hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71afbd2d238f05fc62377a102459dbebc72d784 | 2,830 | py | Python | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | from shutil import copyfile
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def setup(self, options):
self.theme_name = options['name']
self.theme_path = Path(options['name'])
self.rdmo_path = Path(apps.get_app_config('rdmo').path)
self.local_path = Path().cwd() / 'config' / 'settings' / 'local.py'
def copy(self, path):
source_path = self.rdmo_path / path
target_path = self.theme_path / Path(*path.parts[1:])
if target_path.exists():
print('Skip {} -> {}. Target file exists.'.format(source_path, target_path))
else:
print('Copy {} -> {}.'.format(source_path, target_path))
target_path.parent.mkdir(parents=True, exist_ok=True)
copyfile(source_path, target_path)
def enable_theme(self):
settings_line = 'INSTALLED_APPS = [\'{}\'] + INSTALLED_APPS'.format(self.theme_name)
replaced = False
local_settings = self.local_path.read_text().splitlines()
for i, line in enumerate(local_settings):
if line == settings_line:
# return if the line is already there
return
if line == '# ' + settings_line:
local_settings[i] = settings_line
replaced = True
if not replaced:
local_settings.append('')
local_settings.append(settings_line)
local_settings.append('')
self.local_path.write_text('\n'.join(local_settings))
def add_arguments(self, parser):
parser.add_argument('--name', action='store', default='rdmo_theme', help='Module name for the theme.')
parser.add_argument('--file', action='store', help='Copy specific file/template, e.g. core/static/css/variables.scss.')
def handle(self, *args, **options):
self.setup(options)
if options['file']:
self.copy(Path(options['file']))
else:
self.theme_path.mkdir(exist_ok=True)
self.theme_path.joinpath('__init__.py').touch()
self.theme_path.joinpath('locale').mkdir(exist_ok=True)
self.copy(Path('core') / 'static' / 'core' / 'css' / 'variables.scss')
for language, language_string in settings.LANGUAGES:
self.copy(Path('core') / 'templates' / 'core' / 'home_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'about_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'footer_text_{}.html'.format(language))
print('Enable theme by adding the necessary config line.')
self.enable_theme()
print('Done')
| 37.733333 | 127 | 0.608834 | from shutil import copyfile
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def setup(self, options):
self.theme_name = options['name']
self.theme_path = Path(options['name'])
self.rdmo_path = Path(apps.get_app_config('rdmo').path)
self.local_path = Path().cwd() / 'config' / 'settings' / 'local.py'
def copy(self, path):
source_path = self.rdmo_path / path
target_path = self.theme_path / Path(*path.parts[1:])
if target_path.exists():
print('Skip {} -> {}. Target file exists.'.format(source_path, target_path))
else:
print('Copy {} -> {}.'.format(source_path, target_path))
target_path.parent.mkdir(parents=True, exist_ok=True)
copyfile(source_path, target_path)
def enable_theme(self):
settings_line = 'INSTALLED_APPS = [\'{}\'] + INSTALLED_APPS'.format(self.theme_name)
replaced = False
local_settings = self.local_path.read_text().splitlines()
for i, line in enumerate(local_settings):
if line == settings_line:
return
if line == '# ' + settings_line:
local_settings[i] = settings_line
replaced = True
if not replaced:
local_settings.append('')
local_settings.append(settings_line)
local_settings.append('')
self.local_path.write_text('\n'.join(local_settings))
def add_arguments(self, parser):
parser.add_argument('--name', action='store', default='rdmo_theme', help='Module name for the theme.')
parser.add_argument('--file', action='store', help='Copy specific file/template, e.g. core/static/css/variables.scss.')
def handle(self, *args, **options):
self.setup(options)
if options['file']:
self.copy(Path(options['file']))
else:
self.theme_path.mkdir(exist_ok=True)
self.theme_path.joinpath('__init__.py').touch()
self.theme_path.joinpath('locale').mkdir(exist_ok=True)
self.copy(Path('core') / 'static' / 'core' / 'css' / 'variables.scss')
for language, language_string in settings.LANGUAGES:
self.copy(Path('core') / 'templates' / 'core' / 'home_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'about_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'footer_text_{}.html'.format(language))
print('Enable theme by adding the necessary config line.')
self.enable_theme()
print('Done')
| true | true |
f71afbdf559edc30ab16fc96de12394ee0fbf228 | 1,115 | py | Python | boards/tests/test_view_boards.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | 4 | 2018-12-25T13:56:18.000Z | 2019-12-22T16:04:50.000Z | boards/tests/test_view_boards.py | bvermeulen/Django | e4ef21c2f1fb7d026207c25bd443252c6df354bf | [
"MIT"
] | 15 | 2019-12-10T06:22:19.000Z | 2022-03-11T23:46:49.000Z | boards/tests/test_view_boards.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | 2 | 2021-02-16T18:52:19.000Z | 2021-03-30T16:40:46.000Z | from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.test import TestCase
from ..views import BoardListView
from ..models import Board
class BoardsTests(TestCase):
def setUp(self):
username = 'joe'
password = '123'
_ = User.objects.create_user(username=username,
email='jane@doe.com', password=password)
self.client.login(username=username, password=password)
self.board = Board.objects.create(name='Django', description='Django board')
url = reverse('boards')
self.response = self.client.get(url)
def test_boards_view_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_boards_url_resolves_boards_view(self):
view = resolve('/boards/')
self.assertEqual(view.func.view_class, BoardListView)
def test_boards_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'board_pk': self.board.pk})
self.assertContains(self.response, f'href="{board_topics_url}"')
| 38.448276 | 86 | 0.69148 | from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.test import TestCase
from ..views import BoardListView
from ..models import Board
class BoardsTests(TestCase):
def setUp(self):
username = 'joe'
password = '123'
_ = User.objects.create_user(username=username,
email='jane@doe.com', password=password)
self.client.login(username=username, password=password)
self.board = Board.objects.create(name='Django', description='Django board')
url = reverse('boards')
self.response = self.client.get(url)
def test_boards_view_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_boards_url_resolves_boards_view(self):
view = resolve('/boards/')
self.assertEqual(view.func.view_class, BoardListView)
def test_boards_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'board_pk': self.board.pk})
self.assertContains(self.response, f'href="{board_topics_url}"')
| true | true |
f71afc8fa1cfc528ee34eb6345a7ce015abf36d4 | 10,281 | py | Python | test/test_praat.py | hadware/pympi | f7ee43dff7e809395bd097849d2e7bc6e602096b | [
"MIT"
] | 1 | 2019-11-09T20:33:14.000Z | 2019-11-09T20:33:14.000Z | test/test_praat.py | git-em/pympi | ad5e52b15979b09ea43df5e25dcf1c5b280e99fb | [
"MIT"
] | null | null | null | test/test_praat.py | git-em/pympi | ad5e52b15979b09ea43df5e25dcf1c5b280e99fb | [
"MIT"
] | null | null | null | #!/bin/env python
# -*- coding: utf-8 -*-
import unittest
import tempfile
import os
from pympi.Praat import TextGrid
class PraatTest(unittest.TestCase):
def setUp(self):
self.tg = TextGrid(xmax=20)
self.maxdiff = None
# Test all the Praat.TextGrid functions
def test_sort_tiers(self):
self.tg.add_tier('t2')
self.tg.add_tier('t1')
self.tg.add_tier('t3')
self.tg.add_tier('t6')
self.tg.add_tier('t4')
self.tg.add_tier('t5')
tiernames = ['t1', 't2', 't3', 't4', 't5', 't6']
self.tg.sort_tiers()
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
tiernames)
self.tg.sort_tiers(lambda x: list(reversed(tiernames)).index(x.name))
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
list(reversed(tiernames)))
def test_add_tier(self):
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=-1)
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=10)
self.tg.add_tier('tier1')
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(self.tg.tiers[0].tier_type, 'IntervalTier')
self.tg.add_tier('tier2', tier_type='TextTier')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(self.tg.tiers[1].tier_type, 'TextTier')
self.tg.add_tier('tier3')
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.add_tier('tier4', number=2)
self.assertEqual(len(self.tg.tiers), 4)
self.assertEqual(4, len(self.tg.tiers))
def test_remove_tier(self):
self.assertRaises(Exception, self.tg.remove_tier, -1)
self.assertRaises(Exception, self.tg.remove_tier, 10)
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.add_tier('tier4', number=2)
self.tg.remove_tier(3)
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier('tier1')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(['tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier(2)
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(['tier4'], [a.name for a in self.tg.tiers])
self.tg.remove_tier('tier4')
self.assertTrue(not self.tg.tiers)
def test_get_tier(self):
self.assertRaises(Exception, self.tg.get_tier, -1)
self.assertRaises(Exception, self.tg.get_tier, 'a')
self.assertRaises(Exception, self.tg.get_tier, 10)
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
tier3 = self.tg.add_tier('tier3')
self.assertEqual(tier1, self.tg.get_tier(tier1.name))
self.assertEqual(tier3, self.tg.get_tier(tier3.name))
self.assertEqual(self.tg.tiers[1], self.tg.get_tier(tier2.name))
def test_change_tier_name(self):
self.assertRaises(Exception,
self.tg.change_tier_name, -1, 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 'a', 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 10, 'b')
self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.change_tier_name('tier1', 'tier1a')
self.assertEqual(['tier1a', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name(self.tg.tiers.index(tier2)+1, 'tier2a')
self.assertEqual(['tier1a', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name('tier1a', 'tier1')
self.assertEqual(['tier1', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
def test_get_tiers(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.assertEqual(self.tg.tiers,
list(self.tg.get_tiers()))
def test_get_tier_name_num(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3', number=2)
self.assertEqual([(1, 'tier1'), (2, 'tier3'), (3, 'tier2')],
list(self.tg.get_tier_name_num()))
def test_to_file(self):
for codec in ['utf-8', 'latin_1', 'mac_roman']:
self.tg = TextGrid(xmax=20)
tier1 = self.tg.add_tier('tier')
tier1.add_interval(1, 2, 'i1')
tier1.add_interval(2, 3, 'i2')
tier1.add_interval(4, 5, 'i3')
tier4 = self.tg.add_tier('tier')
tier4.add_interval(1, 2, u'i1ü')
tier4.add_interval(2.0, 3, 'i2')
tier4.add_interval(4, 5.0, 'i3')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier2.add_point(1, u'p1ü')
tier2.add_point(2, 'p1')
tier2.add_point(3, 'p1')
tempf = tempfile.mkstemp()[1]
# Normal mode
self.tg.to_file(tempf, codec=codec)
TextGrid(tempf, codec=codec)
# Short mode
self.tg.to_file(tempf, codec=codec, mode='s')
TextGrid(tempf, codec=codec)
# Binary mode
self.tg.to_file(tempf, mode='b')
TextGrid(tempf)
os.remove(tempf)
def test_to_eaf(self):
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier1.add_interval(0, 1, 'int1')
tier1.add_interval(2, 3, 'int2')
tier1.add_interval(5, 6, 'int3')
tier2.add_point(1.5, 'point1')
tier2.add_point(2.5, 'point2')
tier2.add_point(3.5, 'point3')
eaf = self.tg.to_eaf(True, 0.03)
self.assertRaises(ValueError, self.tg.to_eaf, pointlength=-1)
self.assertEqual(sorted(eaf.get_tier_names()),
sorted(['default', 'tier1', 'tier2']))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier1')),
sorted([(0, 1000, 'int1'), (5000, 6000, 'int3'),
(2000, 3000, 'int2')]))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier2')),
sorted([(2500, 2530, 'point2'),
(1500, 1530, 'point1'),
(3500, 3530, 'point3')]))
# Test all the Praat.Tier functions
def setup_tier(self):
self.tier1 = self.tg.add_tier('tier1')
self.tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
def test_add_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.add_point, 5, 'a')
self.tier2.add_point(5, 't')
self.assertEqual([(5, 't')], self.tier2.intervals)
self.assertRaises(Exception, self.tier2.add_point, 5, 'a')
self.tier2.add_point(6, 'a')
self.assertEqual([(5, 't'), (6, 'a')], self.tier2.intervals)
self.tier2.add_point(5, 'a', False)
def test_add_interval(self):
self.setup_tier()
self.assertRaises(Exception,
self.tier2.add_interval, 5, 6, 'a')
self.assertRaises(Exception, self.tier2.add_interval, 6, 5, 'a')
self.tier1.add_interval(5, 6, 't')
self.assertEqual([(5, 6, 't')], self.tier1.intervals)
self.assertRaises(Exception, self.tier1.add_interval, 5.5, 6.5, 't')
self.tier1.add_interval(6, 7, 'a')
self.assertEqual([(5, 6, 't'), (6, 7, 'a')], self.tier1.intervals)
self.tier1.add_interval(5.5, 6.5, 't', False)
def test_remove_interval(self):
self.setup_tier()
self.assertRaises(Exception, self.tier2.remove_interval, 5)
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.remove_interval(5.5)
self.assertEqual([(6, 7, 'b'), (7, 8, 'c')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
def test_remove_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.remove_point, 5)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.remove_point(5)
self.assertEqual([(6, 'b'), (7, 'c')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
def test_get_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(7, 8, 'c')
self.tier1.add_interval(6, 7, 'b')
self.assertEqual([(5, 6, 'a'), (6, 7, 'b'), (7, 8, 'c')],
sorted(self.tier1.get_intervals()))
self.tier2.add_point(5, 'a')
self.tier2.add_point(7, 'c')
self.tier2.add_point(6, 'b')
self.assertEqual([(5, 'a'), (6, 'b'), (7, 'c')],
sorted(self.tier2.get_intervals()))
def test_clear_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.clear_intervals()
self.assertEqual([], self.tier1.intervals)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.clear_intervals()
self.assertEqual([], self.tier2.intervals)
if __name__ == '__main__':
unittest.main()
| 37.797794 | 77 | 0.563661 |
import unittest
import tempfile
import os
from pympi.Praat import TextGrid
class PraatTest(unittest.TestCase):
def setUp(self):
self.tg = TextGrid(xmax=20)
self.maxdiff = None
def test_sort_tiers(self):
self.tg.add_tier('t2')
self.tg.add_tier('t1')
self.tg.add_tier('t3')
self.tg.add_tier('t6')
self.tg.add_tier('t4')
self.tg.add_tier('t5')
tiernames = ['t1', 't2', 't3', 't4', 't5', 't6']
self.tg.sort_tiers()
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
tiernames)
self.tg.sort_tiers(lambda x: list(reversed(tiernames)).index(x.name))
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
list(reversed(tiernames)))
def test_add_tier(self):
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=-1)
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=10)
self.tg.add_tier('tier1')
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(self.tg.tiers[0].tier_type, 'IntervalTier')
self.tg.add_tier('tier2', tier_type='TextTier')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(self.tg.tiers[1].tier_type, 'TextTier')
self.tg.add_tier('tier3')
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.add_tier('tier4', number=2)
self.assertEqual(len(self.tg.tiers), 4)
self.assertEqual(4, len(self.tg.tiers))
def test_remove_tier(self):
self.assertRaises(Exception, self.tg.remove_tier, -1)
self.assertRaises(Exception, self.tg.remove_tier, 10)
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.add_tier('tier4', number=2)
self.tg.remove_tier(3)
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier('tier1')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(['tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier(2)
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(['tier4'], [a.name for a in self.tg.tiers])
self.tg.remove_tier('tier4')
self.assertTrue(not self.tg.tiers)
def test_get_tier(self):
self.assertRaises(Exception, self.tg.get_tier, -1)
self.assertRaises(Exception, self.tg.get_tier, 'a')
self.assertRaises(Exception, self.tg.get_tier, 10)
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
tier3 = self.tg.add_tier('tier3')
self.assertEqual(tier1, self.tg.get_tier(tier1.name))
self.assertEqual(tier3, self.tg.get_tier(tier3.name))
self.assertEqual(self.tg.tiers[1], self.tg.get_tier(tier2.name))
def test_change_tier_name(self):
self.assertRaises(Exception,
self.tg.change_tier_name, -1, 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 'a', 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 10, 'b')
self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.change_tier_name('tier1', 'tier1a')
self.assertEqual(['tier1a', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name(self.tg.tiers.index(tier2)+1, 'tier2a')
self.assertEqual(['tier1a', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name('tier1a', 'tier1')
self.assertEqual(['tier1', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
def test_get_tiers(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.assertEqual(self.tg.tiers,
list(self.tg.get_tiers()))
def test_get_tier_name_num(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3', number=2)
self.assertEqual([(1, 'tier1'), (2, 'tier3'), (3, 'tier2')],
list(self.tg.get_tier_name_num()))
def test_to_file(self):
for codec in ['utf-8', 'latin_1', 'mac_roman']:
self.tg = TextGrid(xmax=20)
tier1 = self.tg.add_tier('tier')
tier1.add_interval(1, 2, 'i1')
tier1.add_interval(2, 3, 'i2')
tier1.add_interval(4, 5, 'i3')
tier4 = self.tg.add_tier('tier')
tier4.add_interval(1, 2, u'i1ü')
tier4.add_interval(2.0, 3, 'i2')
tier4.add_interval(4, 5.0, 'i3')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier2.add_point(1, u'p1ü')
tier2.add_point(2, 'p1')
tier2.add_point(3, 'p1')
tempf = tempfile.mkstemp()[1]
self.tg.to_file(tempf, codec=codec)
TextGrid(tempf, codec=codec)
self.tg.to_file(tempf, codec=codec, mode='s')
TextGrid(tempf, codec=codec)
self.tg.to_file(tempf, mode='b')
TextGrid(tempf)
os.remove(tempf)
def test_to_eaf(self):
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier1.add_interval(0, 1, 'int1')
tier1.add_interval(2, 3, 'int2')
tier1.add_interval(5, 6, 'int3')
tier2.add_point(1.5, 'point1')
tier2.add_point(2.5, 'point2')
tier2.add_point(3.5, 'point3')
eaf = self.tg.to_eaf(True, 0.03)
self.assertRaises(ValueError, self.tg.to_eaf, pointlength=-1)
self.assertEqual(sorted(eaf.get_tier_names()),
sorted(['default', 'tier1', 'tier2']))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier1')),
sorted([(0, 1000, 'int1'), (5000, 6000, 'int3'),
(2000, 3000, 'int2')]))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier2')),
sorted([(2500, 2530, 'point2'),
(1500, 1530, 'point1'),
(3500, 3530, 'point3')]))
def setup_tier(self):
self.tier1 = self.tg.add_tier('tier1')
self.tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
def test_add_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.add_point, 5, 'a')
self.tier2.add_point(5, 't')
self.assertEqual([(5, 't')], self.tier2.intervals)
self.assertRaises(Exception, self.tier2.add_point, 5, 'a')
self.tier2.add_point(6, 'a')
self.assertEqual([(5, 't'), (6, 'a')], self.tier2.intervals)
self.tier2.add_point(5, 'a', False)
def test_add_interval(self):
self.setup_tier()
self.assertRaises(Exception,
self.tier2.add_interval, 5, 6, 'a')
self.assertRaises(Exception, self.tier2.add_interval, 6, 5, 'a')
self.tier1.add_interval(5, 6, 't')
self.assertEqual([(5, 6, 't')], self.tier1.intervals)
self.assertRaises(Exception, self.tier1.add_interval, 5.5, 6.5, 't')
self.tier1.add_interval(6, 7, 'a')
self.assertEqual([(5, 6, 't'), (6, 7, 'a')], self.tier1.intervals)
self.tier1.add_interval(5.5, 6.5, 't', False)
def test_remove_interval(self):
self.setup_tier()
self.assertRaises(Exception, self.tier2.remove_interval, 5)
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.remove_interval(5.5)
self.assertEqual([(6, 7, 'b'), (7, 8, 'c')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
def test_remove_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.remove_point, 5)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.remove_point(5)
self.assertEqual([(6, 'b'), (7, 'c')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
def test_get_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(7, 8, 'c')
self.tier1.add_interval(6, 7, 'b')
self.assertEqual([(5, 6, 'a'), (6, 7, 'b'), (7, 8, 'c')],
sorted(self.tier1.get_intervals()))
self.tier2.add_point(5, 'a')
self.tier2.add_point(7, 'c')
self.tier2.add_point(6, 'b')
self.assertEqual([(5, 'a'), (6, 'b'), (7, 'c')],
sorted(self.tier2.get_intervals()))
def test_clear_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.clear_intervals()
self.assertEqual([], self.tier1.intervals)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.clear_intervals()
self.assertEqual([], self.tier2.intervals)
if __name__ == '__main__':
unittest.main()
| true | true |
f71afce939f7d6088496f8152b5131beafd2e97c | 27,561 | py | Python | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
from cbre.util import *
class CBRENet(object):
"""
cbre_net implements the cycly-balanced representation learning for counterfactual inference
The network is implemented as a tensorflow graph. The class constructor
creates an object containing relevant TF nodes as member variables.
"""
def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
x The varibales of data
t The treatment applied to x, t.shape[1]==1
y_ The true outcome
p_t The treatment probability in all observations
z_norm todo unknown
flags The arg params
r_alpha The coefficient of reconstruction and cycle loss
r_lambda The coefficient of regularization of prediction network
r_beta The coefficient of gradient penalty in GAN
do_in The val of dropout_in
do_out The val of dropout_out
data_x_dim The dim of varibale x
"""
self.variables = {}
# wd_loss: regularization l2 loss
self.wd_loss = 0
if flags.nonlin.lower() == 'elu':
self.nonlin = tf.nn.elu
else:
self.nonlin = tf.nn.relu
self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)
def _add_variable(self, var, name):
"""
Adds variables to the internal track-keeper
"""
basename = name
i = 0
while name in self.variables:
name = '%s_%d' % (basename, i) # @TODO: not consistent with TF internally if changed
i += 1
self.variables[name] = var
def _create_variable(self, var, name):
""" Create and adds variables to the internal track-keeper """
# tf.get_variable(name=name, initializer=var)
var = tf.Variable(var, name=name)
self._add_variable(var, name)
return var
def _create_variable_with_weight_decay(self, initializer, name, wd):
""" Create and adds variables to the internal track-keeper
and adds it to the list of weight decayed variables """
var = self._create_variable(initializer, name)
self.wd_loss += wd * tf.nn.l2_loss(var)
return var
def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
Constructs a TensorFlow subgraph for causal effect inference.
Sets the following member variables (to TF nodes):
self.output The output prediction "y"
self.tot_loss The total objective to minimize
self.pred_loss The prediction term of the objective
self.weights_in The input/representation layer weights
self.weights_out The output/post-representation layer weights
self.weights_pred The (linear) prediction layer weights
self.h_rep The layer of the penalized representation
"""
self.x = x
self.t = t
self.y_ = y_
self.p_t = p_t
self.r_alpha = r_alpha
self.r_lambda = r_lambda
self.r_beta = r_beta
self.do_in = do_in
self.do_out = do_out
self.z_norm = z_norm
self.encoder_dim = flags.encoder_dim
encoder_dim = flags.encoder_dim
self.decoder_dim = flags.decoder_dim
self.predictor_dim = flags.predictor_dim
predictor_dim = flags.predictor_dim
mi_estimator_dim = flags.mi_estimator_dim
self.discriminator_dim = flags.discriminator_dim
discriminator_dim = flags.discriminator_dim
"""
Network Components
"""
'''
1. Encoder Network
'''
# Construct Encoder network layers, four layers with size 200
h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)
'''
2. GAN
'''
d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,
discriminator_dim, do_out,
flags)
# discriminator
# with sigmoid
# discriminator_loss = tf.reduce_mean(tf.nn.softplus(-d0)) + tf.reduce_mean(tf.nn.softplus(-d1) + d1) + dp
# without sigmoid
discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp
# encoder
# with sigmoid
# rep_loss = tf.reduce_mean(tf.nn.softplus(-d1))
# without sigmoid
# todo rep_loss in paper: rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
# rep_loss = -tf.reduce_mean(d1)
'''
3. Reconstruction
'''
# graph for reconstruction loss
x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)
recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)
'''
4. Cycle
'''
x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)
cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)
'''
Predict Networks
'''
y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,
flags)
""" Compute sample reweighting """
if flags.reweight_sample:
w_t = t / (2 * p_t)
w_c = (1 - t) / (2 * 1 - p_t)
sample_weight = w_t + w_c
else:
sample_weight = 1.0
self.sample_weight = sample_weight
risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))
pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)
""" Regularization """
if flags.p_lambda > 0 and flags.rep_weight_decay:
for i in range(0, flags.layer_num_encoder):
if not (flags.varsel and i == 0): # No penalty on W in variable selection
self.wd_loss += tf.nn.l2_loss(weights_in[i])
""" Total error """
tot_error = risk
if flags.p_lambda > 0:
tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss
if flags.coef_recons > 0:
tot_error += flags.coef_recons * recons_loss
if flags.coef_cycle:
tot_error += flags.coef_cycle * cycle_loss
if flags.coef_d:
tot_error += flags.coef_d * discriminator_loss
if flags.varsel:
self.w_proj = tf.placeholder("float", shape=[data_x_dim], name='w_proj')
self.projection = weights_in[0].assign(self.w_proj)
self.output = y
self.tot_loss = tot_error
self.discriminator_loss = discriminator_loss
self.rep_loss = rep_loss
self.rec_loss = recons_loss
self.cycle_loss = cycle_loss
self.recons_cycle_loss = recons_loss + cycle_loss
self.pred_loss = pred_error
self.weights_in = weights_in
self.weights_out = weights_out
self.weights_dis = weights_dis
self.weights_discore = weights_discore
self.weights_pred = weights_pred
self.h_rep = h_rep
self.h_rep_norm = h_rep_norm
self.dp = dp
def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_0') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_1') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):
""" Construct output/regression layers """
if flags.split_output:
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)
y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)
y = tf.dynamic_stitch([i0, i1], [y0, y1])
weights_out = weights_out0 + weights_out1
weights_pred = weights_pred0 + weights_pred1
else:
h_input = tf.concat(1, [rep, t])
# y, weights_out, weights_pred = self._build_output(h_input, encoder_dim + 1, predictor_dim, do_out, flags)
y, weights_out, weights_pred = None, None, None
return y, weights_out, weights_pred
def _build_encoder(self, x, data_x_dim, flags):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
if flags.batch_norm:
bn_biases = []
bn_scales = []
h_in = [x]
for i in range(0, flags.layer_num_encoder):
if i == 0:
""" If using variable selection, first layer is just rescaling"""
if flags.varsel:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([data_x_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
data_x_dim)))
weights_in.append(wg)
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
self.encoder_dim)))
weights_in.append(wg)
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]
if flags.batch_norm:
batch_mean, batch_var = tf.nn.moments(z, [0])
if flags.normalization == 'bn_fixed':
z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)
else:
# bn_biases.append(tf.Variable(tf.zeros([self.encoder_dim])))
bn_biases.append(
tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))
# bn_scales.append(tf.Variable(tf.ones([self.encoder_dim])))
bn_scales.append(
tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))
z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)
h_in.append(self.nonlin(z))
h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)
h_rep = h_in[-1]
# todo normalization meaning?
if flags.normalization == 'divide':
h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)
else:
h_rep_norm = 1.0 * h_rep
return h_rep, h_rep_norm, weights_in
def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):
with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
recons_x = [h_rep]
decoder_dim = flags.decoder_dim
for i in range(0, flags.layer_num_decoder):
if i == 0:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([flags.encoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
flags.encoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
elif i == flags.layer_num_decoder - 1:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))
else:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]
recons_x.append(self.nonlin(z))
recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)
recons_x = recons_x[-1]
return recons_x, weights_in
def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):
""" Construct MI estimation layers """
# two layers with size 200
with tf.variable_scope('gmi') as scope:
input_num = tf.shape(x)[0]
x_shuffle = tf.random_shuffle(x)
x_conc = tf.concat([x, x_shuffle], axis=0)
y_conc = tf.concat([hrep, hrep], axis=0)
# forward
# [25, 200]
weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(data_x_dim)),
'weights_mi_x')
biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')
# [, 200]
lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x
# [200, 200]
weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(encoder_dim)),
'weights_mi_y')
biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')
# [, 200]
lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y
# lin_conc = tf.nn.relu(lin_x + lin_y)
lin_conc = self.nonlin(lin_x + lin_y)
weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],
stddev=flags.weight_init / np.sqrt(
mi_estimator_dim)),
'gmi_p')
biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')
gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred
# real estimator outcome: shape=[input_num, 1]
real_estimate = gmi_output[:input_num]
# fake estimator outcome: shape=[input_num, 1]
fake_estimate = gmi_output[input_num:]
return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred
def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):
""" Construct adversarial discriminator layers """
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
h_dis = [hrep]
weights_dis = []
biases_dis = []
for i in range(0, flags.layer_num_discriminator):
if i == 0:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([encoder_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
encoder_dim))))
else:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(
[discriminator_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
discriminator_dim))))
biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))
z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]
h_dis.append(self.nonlin(z))
h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)
weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],
stddev=flags.weight_init / np.sqrt(
discriminator_dim)), name='dc_p')
bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')
h_score = h_dis[-1]
dis_score = tf.matmul(h_score, weights_discore) + bias_dc
return dis_score, weights_dis, weights_discore
def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):
"""
Construct adversarial discriminator
"""
# three layers with size 200
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)
z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)
z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)
z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)
d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
# gradient penalty
alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)
alpha = alpha_dist.sample((1, 1))
interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)
interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)
inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,
encoder_dim + encoder_dim,
discriminator_dim, do_out,
flags)
gradients = tf.gradients(inte_logit, [interpolated])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)
gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))
return d0, d1, gradient_penalty, weights_dis, weights_discore
def _build_reconstruct_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing reconstruction loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)
h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)
recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')
recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, recons_x_0, x1, recons_x_1
def _build_cycle_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing cycle loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
# cycle x0-x1'-x0
_, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)
temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')
_, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)
cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')
# cycle x1-x0'-x1
_, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)
temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')
_, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)
cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, cycle_x0, x1, cycle_x1
| 48.43761 | 128 | 0.532274 | import tensorflow as tf
import numpy as np
from cbre.util import *
class CBRENet(object):
def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
self.variables = {}
self.wd_loss = 0
if flags.nonlin.lower() == 'elu':
self.nonlin = tf.nn.elu
else:
self.nonlin = tf.nn.relu
self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)
def _add_variable(self, var, name):
basename = name
i = 0
while name in self.variables:
name = '%s_%d' % (basename, i)
i += 1
self.variables[name] = var
def _create_variable(self, var, name):
var = tf.Variable(var, name=name)
self._add_variable(var, name)
return var
def _create_variable_with_weight_decay(self, initializer, name, wd):
var = self._create_variable(initializer, name)
self.wd_loss += wd * tf.nn.l2_loss(var)
return var
def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
self.x = x
self.t = t
self.y_ = y_
self.p_t = p_t
self.r_alpha = r_alpha
self.r_lambda = r_lambda
self.r_beta = r_beta
self.do_in = do_in
self.do_out = do_out
self.z_norm = z_norm
self.encoder_dim = flags.encoder_dim
encoder_dim = flags.encoder_dim
self.decoder_dim = flags.decoder_dim
self.predictor_dim = flags.predictor_dim
predictor_dim = flags.predictor_dim
mi_estimator_dim = flags.mi_estimator_dim
self.discriminator_dim = flags.discriminator_dim
discriminator_dim = flags.discriminator_dim
h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)
d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,
discriminator_dim, do_out,
flags)
discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp
rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)
recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)
x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)
cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)
y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,
flags)
if flags.reweight_sample:
w_t = t / (2 * p_t)
w_c = (1 - t) / (2 * 1 - p_t)
sample_weight = w_t + w_c
else:
sample_weight = 1.0
self.sample_weight = sample_weight
risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))
pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)
if flags.p_lambda > 0 and flags.rep_weight_decay:
for i in range(0, flags.layer_num_encoder):
if not (flags.varsel and i == 0):
self.wd_loss += tf.nn.l2_loss(weights_in[i])
tot_error = risk
if flags.p_lambda > 0:
tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss
if flags.coef_recons > 0:
tot_error += flags.coef_recons * recons_loss
if flags.coef_cycle:
tot_error += flags.coef_cycle * cycle_loss
if flags.coef_d:
tot_error += flags.coef_d * discriminator_loss
if flags.varsel:
self.w_proj = tf.placeholder("float", shape=[data_x_dim], name='w_proj')
self.projection = weights_in[0].assign(self.w_proj)
self.output = y
self.tot_loss = tot_error
self.discriminator_loss = discriminator_loss
self.rep_loss = rep_loss
self.rec_loss = recons_loss
self.cycle_loss = cycle_loss
self.recons_cycle_loss = recons_loss + cycle_loss
self.pred_loss = pred_error
self.weights_in = weights_in
self.weights_out = weights_out
self.weights_dis = weights_dis
self.weights_discore = weights_discore
self.weights_pred = weights_pred
self.h_rep = h_rep
self.h_rep_norm = h_rep_norm
self.dp = dp
def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_0') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1]))
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_1') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):
if flags.split_output:
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)
y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)
y = tf.dynamic_stitch([i0, i1], [y0, y1])
weights_out = weights_out0 + weights_out1
weights_pred = weights_pred0 + weights_pred1
else:
h_input = tf.concat(1, [rep, t])
y, weights_out, weights_pred = None, None, None
return y, weights_out, weights_pred
def _build_encoder(self, x, data_x_dim, flags):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
if flags.batch_norm:
bn_biases = []
bn_scales = []
h_in = [x]
for i in range(0, flags.layer_num_encoder):
if i == 0:
if flags.varsel:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([data_x_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
data_x_dim)))
weights_in.append(wg)
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
self.encoder_dim)))
weights_in.append(wg)
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))
z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]
if flags.batch_norm:
batch_mean, batch_var = tf.nn.moments(z, [0])
if flags.normalization == 'bn_fixed':
z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)
else:
bn_biases.append(
tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))
bn_scales.append(
tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))
z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)
h_in.append(self.nonlin(z))
h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)
h_rep = h_in[-1]
if flags.normalization == 'divide':
h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)
else:
h_rep_norm = 1.0 * h_rep
return h_rep, h_rep_norm, weights_in
def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):
with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
recons_x = [h_rep]
decoder_dim = flags.decoder_dim
for i in range(0, flags.layer_num_decoder):
if i == 0:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([flags.encoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
flags.encoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
elif i == flags.layer_num_decoder - 1:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))
else:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]
recons_x.append(self.nonlin(z))
recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)
recons_x = recons_x[-1]
return recons_x, weights_in
def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):
with tf.variable_scope('gmi') as scope:
input_num = tf.shape(x)[0]
x_shuffle = tf.random_shuffle(x)
x_conc = tf.concat([x, x_shuffle], axis=0)
y_conc = tf.concat([hrep, hrep], axis=0)
weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(data_x_dim)),
'weights_mi_x')
biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')
lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x
weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(encoder_dim)),
'weights_mi_y')
biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')
lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y
lin_conc = self.nonlin(lin_x + lin_y)
weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],
stddev=flags.weight_init / np.sqrt(
mi_estimator_dim)),
'gmi_p')
biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')
gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred
real_estimate = gmi_output[:input_num]
fake_estimate = gmi_output[input_num:]
return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred
def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
h_dis = [hrep]
weights_dis = []
biases_dis = []
for i in range(0, flags.layer_num_discriminator):
if i == 0:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([encoder_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
encoder_dim))))
else:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(
[discriminator_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
discriminator_dim))))
biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))
z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]
h_dis.append(self.nonlin(z))
h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)
weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],
stddev=flags.weight_init / np.sqrt(
discriminator_dim)), name='dc_p')
bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')
h_score = h_dis[-1]
dis_score = tf.matmul(h_score, weights_discore) + bias_dc
return dis_score, weights_dis, weights_discore
def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)
z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)
z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)
z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)
d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)
alpha = alpha_dist.sample((1, 1))
interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)
interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)
inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,
encoder_dim + encoder_dim,
discriminator_dim, do_out,
flags)
gradients = tf.gradients(inte_logit, [interpolated])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)
gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))
return d0, d1, gradient_penalty, weights_dis, weights_discore
def _build_reconstruct_graph(self, x, t, data_x_dim, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)
h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)
recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')
recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, recons_x_0, x1, recons_x_1
def _build_cycle_graph(self, x, t, data_x_dim, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
_, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)
temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')
_, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)
cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')
# cycle x1-x0'-x1
_, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)
temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')
_, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)
cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, cycle_x0, x1, cycle_x1
| true | true |
f71afe637d8afd637eaa9306cb3f27585ad52570 | 887 | py | Python | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-01-07T15:47:32.000Z | 2020-09-13T14:05:32.000Z | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-10-04T18:03:36.000Z | 2020-10-08T07:13:40.000Z | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-10-04T18:15:54.000Z | 2021-01-20T19:43:49.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sorting_visualizer",
version="1.0",
author="Debdut Goswami",
author_email="debdutgoswami@gmail.com",
description="A package to visualize various sorting algorithms.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/debdutgoswami/sorting-visualizer",
download_url = 'https://github.com/debdutgoswami/sorting-visualizer/archive/v1.0.tar.gz',
keywords = ['SORT', 'ALGORITHM', 'VISUALIZE'],
packages=setuptools.find_packages(),
install_requires=[
'matplotlib'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) | 32.851852 | 93 | 0.67531 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sorting_visualizer",
version="1.0",
author="Debdut Goswami",
author_email="debdutgoswami@gmail.com",
description="A package to visualize various sorting algorithms.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/debdutgoswami/sorting-visualizer",
download_url = 'https://github.com/debdutgoswami/sorting-visualizer/archive/v1.0.tar.gz',
keywords = ['SORT', 'ALGORITHM', 'VISUALIZE'],
packages=setuptools.find_packages(),
install_requires=[
'matplotlib'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) | true | true |
f71affe80caeb8141a00993c2bdfd94c37876974 | 383 | py | Python | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | null | null | null | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | null | null | null | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | 1 | 2020-02-18T20:56:57.000Z | 2020-02-18T20:56:57.000Z | import os
from github_client.client import GitHubClient
from utils.painter import paint
if __name__ == '__main__':
user = os.environ['user_name']
password = os.environ['user_password']
client = GitHubClient(user, password)
client.connect()
repositories = client.get_repositories()
user = client.get_user()
paint([user.login], [repositories], 500, 10000)
| 29.461538 | 51 | 0.718016 | import os
from github_client.client import GitHubClient
from utils.painter import paint
if __name__ == '__main__':
user = os.environ['user_name']
password = os.environ['user_password']
client = GitHubClient(user, password)
client.connect()
repositories = client.get_repositories()
user = client.get_user()
paint([user.login], [repositories], 500, 10000)
| true | true |
f71b00645a1360df4f8b7496608b98342bb43f7f | 8,243 | py | Python | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | null | null | null | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 3 | 2021-04-28T14:43:32.000Z | 2021-04-28T14:58:04.000Z | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | sunash/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 1 | 2020-05-12T19:13:51.000Z | 2020-05-12T19:13:51.000Z | # coding=utf-8
""" Tests for SimAction and action space functions. """
import unittest
from typing import Callable, Dict, List, Any
from unittest.mock import create_autospec
import numpy as np
from gym import Space
from ..action_spaces import (
SimAction,
single_charging_schedule,
zero_centered_single_charging_schedule,
)
from ...interfaces import GymTrainedInterface
class TestSimAction(unittest.TestCase):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
# The type here is Any as space_function is actually a Mock
# object, but there's no Mock type in the typing library.
cls.space_function: Any = create_autospec(lambda interface: Space())
cls.to_schedule: Callable[
[GymTrainedInterface, np.ndarray], Dict[str, List[float]]
] = lambda interface, array: {"a": [0]}
cls.name: str = "stub_action"
cls.sim_action: SimAction = SimAction(
cls.space_function, cls.to_schedule, cls.name
)
cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)
def test_correct_on_init_sim_action_name(self) -> None:
self.assertEqual(self.sim_action.name, self.name)
def test_get_space(self) -> None:
self.sim_action.get_space(self.interface)
self.space_function.assert_called_once()
def test_get_schedule(self) -> None:
array: np.ndarray = np.array([[1, 0], [0, 1]])
self.assertEqual(
self.sim_action.get_schedule(self.interface, array), {"a": [0]}
)
class TestSingleChargingSchedule(unittest.TestCase):
# Some class variables are defined outside of setUpClass so that
# the code inspector knows that inherited classes have these
# attributes.
max_rate: float = 16.0
min_rate: float = 0.0
negative_rate: float = -4.0
deadband_rate: float = 6.0
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
cls.sim_action: SimAction = single_charging_schedule()
cls.station_ids: List[str] = ["T1", "T2"]
cls.offset: float = 0.5
def _interface_builder(interface: Any, min_rate: float) -> Any:
interface.station_ids = cls.station_ids
interface.max_pilot_signal = lambda station_id: cls.max_rate
interface.min_pilot_signal = lambda station_id: (
min_rate if station_id == cls.station_ids[1] else cls.min_rate
)
return interface
cls.interface: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.min_rate
)
cls.interface_negative_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.negative_rate
)
cls.interface_deadband_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.deadband_rate
)
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "single schedule")
def _test_space_function_helper(
self, interface: GymTrainedInterface, min_rate: float, max_rate: float
) -> None:
out_space: Space = self.sim_action.get_space(interface)
self.assertEqual(out_space.shape, (len(self.station_ids),))
np.testing.assert_equal(out_space.low, 2 * [min_rate])
np.testing.assert_equal(out_space.high, 2 * [max_rate])
self.assertEqual(out_space.dtype, "float")
def test_single_space_function(self) -> None:
self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min, self.negative_rate, self.max_rate
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.min_rate, self.max_rate
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate + self.offset],
self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],
},
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [self.min_rate - self.offset],
self.station_ids[1]: [self.max_rate + self.offset],
},
)
def test_single_error_schedule(self) -> None:
with self.assertRaises(TypeError):
_ = self.sim_action.get_schedule(
self.interface,
np.array(
[[self.min_rate - self.offset], [self.max_rate + self.offset]]
),
)
class TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.sim_action: SimAction = zero_centered_single_charging_schedule()
cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2
cls.shifted_minimums = [
cls.min_rate - (cls.max_rate + cls.min_rate) / 2,
cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,
cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,
]
cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "zero-centered single schedule")
def test_single_space_function(self) -> None:
self._test_space_function_helper(
self.interface, self.shifted_minimums[0], self.shifted_max
)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min,
self.shifted_minimums[1],
self.negative_max_shift,
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [
self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2
],
self.station_ids[1]: [
self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2
],
},
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[
self.min_rate - (self.max_rate + self.min_rate) / 2,
self.max_rate - (self.max_rate + self.min_rate) / 2,
]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate],
self.station_ids[1]: [self.max_rate],
},
)
if __name__ == "__main__":
unittest.main()
| 37.298643 | 86 | 0.626592 |
import unittest
from typing import Callable, Dict, List, Any
from unittest.mock import create_autospec
import numpy as np
from gym import Space
from ..action_spaces import (
SimAction,
single_charging_schedule,
zero_centered_single_charging_schedule,
)
from ...interfaces import GymTrainedInterface
class TestSimAction(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.space_function: Any = create_autospec(lambda interface: Space())
cls.to_schedule: Callable[
[GymTrainedInterface, np.ndarray], Dict[str, List[float]]
] = lambda interface, array: {"a": [0]}
cls.name: str = "stub_action"
cls.sim_action: SimAction = SimAction(
cls.space_function, cls.to_schedule, cls.name
)
cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)
def test_correct_on_init_sim_action_name(self) -> None:
self.assertEqual(self.sim_action.name, self.name)
def test_get_space(self) -> None:
self.sim_action.get_space(self.interface)
self.space_function.assert_called_once()
def test_get_schedule(self) -> None:
array: np.ndarray = np.array([[1, 0], [0, 1]])
self.assertEqual(
self.sim_action.get_schedule(self.interface, array), {"a": [0]}
)
class TestSingleChargingSchedule(unittest.TestCase):
# Some class variables are defined outside of setUpClass so that
# the code inspector knows that inherited classes have these
# attributes.
max_rate: float = 16.0
min_rate: float = 0.0
negative_rate: float = -4.0
deadband_rate: float = 6.0
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
cls.sim_action: SimAction = single_charging_schedule()
cls.station_ids: List[str] = ["T1", "T2"]
cls.offset: float = 0.5
def _interface_builder(interface: Any, min_rate: float) -> Any:
interface.station_ids = cls.station_ids
interface.max_pilot_signal = lambda station_id: cls.max_rate
interface.min_pilot_signal = lambda station_id: (
min_rate if station_id == cls.station_ids[1] else cls.min_rate
)
return interface
cls.interface: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.min_rate
)
cls.interface_negative_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.negative_rate
)
cls.interface_deadband_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.deadband_rate
)
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "single schedule")
def _test_space_function_helper(
self, interface: GymTrainedInterface, min_rate: float, max_rate: float
) -> None:
out_space: Space = self.sim_action.get_space(interface)
self.assertEqual(out_space.shape, (len(self.station_ids),))
np.testing.assert_equal(out_space.low, 2 * [min_rate])
np.testing.assert_equal(out_space.high, 2 * [max_rate])
self.assertEqual(out_space.dtype, "float")
def test_single_space_function(self) -> None:
self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min, self.negative_rate, self.max_rate
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.min_rate, self.max_rate
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate + self.offset],
self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],
},
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [self.min_rate - self.offset],
self.station_ids[1]: [self.max_rate + self.offset],
},
)
def test_single_error_schedule(self) -> None:
with self.assertRaises(TypeError):
_ = self.sim_action.get_schedule(
self.interface,
np.array(
[[self.min_rate - self.offset], [self.max_rate + self.offset]]
),
)
class TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.sim_action: SimAction = zero_centered_single_charging_schedule()
cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2
cls.shifted_minimums = [
cls.min_rate - (cls.max_rate + cls.min_rate) / 2,
cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,
cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,
]
cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "zero-centered single schedule")
def test_single_space_function(self) -> None:
self._test_space_function_helper(
self.interface, self.shifted_minimums[0], self.shifted_max
)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min,
self.shifted_minimums[1],
self.negative_max_shift,
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [
self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2
],
self.station_ids[1]: [
self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2
],
},
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[
self.min_rate - (self.max_rate + self.min_rate) / 2,
self.max_rate - (self.max_rate + self.min_rate) / 2,
]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate],
self.station_ids[1]: [self.max_rate],
},
)
if __name__ == "__main__":
unittest.main()
| true | true |
f71b01275c21a9328c6c1e5ce44454451a5dbe4c | 8,256 | py | Python | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 36 | 2017-02-10T18:39:03.000Z | 2022-03-23T19:52:38.000Z | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 9 | 2017-02-11T20:33:31.000Z | 2019-04-12T19:02:19.000Z | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 19 | 2017-03-19T23:54:05.000Z | 2020-09-02T14:42:57.000Z | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_fine_uploader
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_fine_uploader'
copyright = u'2017, Douglas Miranda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_fine_uploader.__version__
# The full version, including alpha/beta/rc tags.
release = django_fine_uploader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fine-uploaderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fine-uploader.tex', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
[u'Douglas Miranda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'django-fine-uploader', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.376471 | 82 | 0.721294 |
import sys, os
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_fine_uploader
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django_fine_uploader'
copyright = u'2017, Douglas Miranda'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_fine_uploader.__version__
# The full version, including alpha/beta/rc tags.
release = django_fine_uploader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fine-uploaderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fine-uploader.tex', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
[u'Douglas Miranda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'django-fine-uploader', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f71b01c4adc02f98e92a5b0a74684be8020686f2 | 397 | py | Python | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | 1 | 2015-05-08T09:07:06.000Z | 2015-05-08T09:07:06.000Z | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | null | null | null | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
from django.conf.urls import patterns, url
# from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='home'),
url(r'^graph/$', 'app.views.graph', name='graph'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
# url(r'^admin/', include(admin.site.urls)),
) | 36.090909 | 104 | 0.662469 | from django.conf import settings
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='home'),
url(r'^graph/$', 'app.views.graph', name='graph'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
) | true | true |
f71b01e05fff8dd74f109eed0f8a0d197aea5341 | 2,498 | py | Python | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | import os
import numpy as np
from addict import Dict
from PIL import Image
from .reader import Reader
from .builder import READER
__all__ = ['CCPD2019FolderReader']
@READER.register_module()
class CCPD2019FolderReader(Reader):
def __init__(self, root, **kwargs):
super(CCPD2019FolderReader, self).__init__(**kwargs)
self.root = root
self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
'苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
'桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O', '-')
self.img_paths = sorted(os.listdir(kwargs['root']))
assert len(self.img_paths) > 0
def get_dataset_info(self):
return range(len(self.img_paths)), Dict({'chars': self.chars})
def get_data_info(self, index):
img = Image.open(self.img_paths[index][0])
w, h = img.size
return dict(h=h, w=w)
def __call__(self, index):
# index = data_dict
# img = Image.open(os.path.join(self.root, self.img_paths[index])).convert('RGB')
img = self.read_image(os.path.join(self.root, self.img_paths[index]))
w, h = img.size
path = os.path.join(self.root, self.img_paths[index])
base_name = os.path.basename(self.img_paths[index])
img_name, suffix = os.path.splitext(base_name)
img_name = img_name.split("-")[0].split("_")[0]
# if len(img_name) == 8:
# print(path, 'a')
# if img_name[2] != 'D' and img_name[2] != 'F' and img_name[-1] != 'D' and img_name[-1] != 'F':
# print(path)
# raise ValueError
words = []
for c in img_name:
words.append(self.chars.index(c))
# return {'image': img, 'ori_size': np.array([h, w]).astype(np.float32), 'path': path, 'seq': words, 'seq_length': len(words)}
return dict(
image=img,
ori_size=np.array([h, w]).astype(np.float32),
path=path,
seq=words,
seq_length=len(words)
)
def __repr__(self):
return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())
| 36.202899 | 134 | 0.502002 | import os
import numpy as np
from addict import Dict
from PIL import Image
from .reader import Reader
from .builder import READER
__all__ = ['CCPD2019FolderReader']
@READER.register_module()
class CCPD2019FolderReader(Reader):
def __init__(self, root, **kwargs):
super(CCPD2019FolderReader, self).__init__(**kwargs)
self.root = root
self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
'苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
'桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O', '-')
self.img_paths = sorted(os.listdir(kwargs['root']))
assert len(self.img_paths) > 0
def get_dataset_info(self):
return range(len(self.img_paths)), Dict({'chars': self.chars})
def get_data_info(self, index):
img = Image.open(self.img_paths[index][0])
w, h = img.size
return dict(h=h, w=w)
def __call__(self, index):
img = self.read_image(os.path.join(self.root, self.img_paths[index]))
w, h = img.size
path = os.path.join(self.root, self.img_paths[index])
base_name = os.path.basename(self.img_paths[index])
img_name, suffix = os.path.splitext(base_name)
img_name = img_name.split("-")[0].split("_")[0]
words = []
for c in img_name:
words.append(self.chars.index(c))
return dict(
image=img,
ori_size=np.array([h, w]).astype(np.float32),
path=path,
seq=words,
seq_length=len(words)
)
def __repr__(self):
return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())
| true | true |
f71b0315edb287f8456c42472f645c6b7bd59cdc | 383 | py | Python | app/src/asgi.py | swelanauguste/kingship | d5c302b22383eb769d22f41e69e0c48e638aec92 | [
"MIT"
] | 5 | 2022-02-04T19:23:26.000Z | 2022-02-26T10:15:25.000Z | src/asgi.py | AnvarKhan/django-rest-api | b2f60bbd7ebcf0977dc13ceffd9a3a4f631a03ee | [
"Apache-2.0"
] | 1 | 2022-01-15T16:22:30.000Z | 2022-01-15T16:22:30.000Z | src/asgi.py | AnvarKhan/django-rest-api | b2f60bbd7ebcf0977dc13ceffd9a3a4f631a03ee | [
"Apache-2.0"
] | 1 | 2022-03-31T15:02:47.000Z | 2022-03-31T15:02:47.000Z | """
ASGI config for src project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
application = get_asgi_application()
| 22.529412 | 78 | 0.780679 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
application = get_asgi_application()
| true | true |
f71b0388863894e2666b6a38ce7c0c7eb1c0da2c | 462 | py | Python | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | import black
class GenerationError(Exception):
"""Represents an exception while generating the Python syntax"""
class PythonSyntaxError(GenerationError):
"""Represents an exception in the python syntax"""
class InputError(GenerationError, black.InvalidInput):
"""Raised when the generated Python code isn't valid by black"""
class SaveFileError(GenerationError, OSError):
"""Raised when the generated Python code file can't be created"""
| 25.666667 | 69 | 0.757576 | import black
class GenerationError(Exception):
class PythonSyntaxError(GenerationError):
class InputError(GenerationError, black.InvalidInput):
class SaveFileError(GenerationError, OSError):
| true | true |
f71b0396126fe3f7506f8a4954c971dd92305753 | 7,390 | py | Python | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | 4 | 2021-12-23T20:55:52.000Z | 2022-03-14T04:57:02.000Z | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | null | null | null | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | 1 | 2022-01-14T01:32:04.000Z | 2022-01-14T01:32:04.000Z | from collections import deque, OrderedDict
from functools import partial
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.samplers.rollout_functions import rollout
class ActionAgent():
def __init__(self):
self._actions = None
self._step = 0
def reset(self):
self._step = 0
def set_action(self, actions):
self._actions = actions
def get_action(self, *args, **kwargs):
action = self._actions[self._step]
self._step += 1
return action, []
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._action_agent = ActionAgent()
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min( # Do not go over num_steps
max_path_length,
num_steps - num_steps_collected,
)
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):
paths = []
num_steps_collected = 0
for p in path_actions:
max_path_length = len(p)
self._action_agent.set_action(p)
path = self._rollout_fn(
self._env,
self._action_agent,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
num_steps_total=self._num_steps_total,
num_paths_total=self._num_paths_total,
)
if self._save_env_in_snapshot:
snapshot_dict['env'] = self._env
return snapshot_dict
def load_from_snapshot(self, snapshot):
self._policy = snapshot['policy']
self._num_steps_total = snapshot['num_steps_total']
self._num_paths_total = snapshot['num_paths_total']
if self._save_env_in_snapshot:
assert 'env' in snapshot
if hasattr(self._env, '_custom_goal_sampler'):
snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler
self._env = snapshot['env']
class GoalConditionedPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
desired_goal_key='desired_goal',
goal_sampling_mode=None,
**kwargs
):
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._goal_sampling_mode = goal_sampling_mode
def collect_new_paths(self, *args, **kwargs):
self._env.goal_sampling_mode = self._goal_sampling_mode
return super().collect_new_paths(*args, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
return snapshot
def load_from_snapshot(self, snapshot):
super().load_from_snapshot(snapshot)
self._observation_key = snapshot['observation_key']
self._desired_goal_key = snapshot['desired_goal_key']
class ObsDictPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
**kwargs
):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
)
return snapshot
class VAEWrappedEnvPathCollector(GoalConditionedPathCollector):
def __init__(
self,
env,
policy,
decode_goals=False,
**kwargs
):
"""Expects env is VAEWrappedEnv"""
super().__init__(env, policy, **kwargs)
self._decode_goals = decode_goals
def collect_new_paths(self, *args, **kwargs):
self._env.decode_goals = self._decode_goals
return super().collect_new_paths(*args, **kwargs)
| 31.446809 | 85 | 0.600406 | from collections import deque, OrderedDict
from functools import partial
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.samplers.rollout_functions import rollout
class ActionAgent():
def __init__(self):
self._actions = None
self._step = 0
def reset(self):
self._step = 0
def set_action(self, actions):
self._actions = actions
def get_action(self, *args, **kwargs):
action = self._actions[self._step]
self._step += 1
return action, []
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._action_agent = ActionAgent()
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min(
max_path_length,
num_steps - num_steps_collected,
)
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):
paths = []
num_steps_collected = 0
for p in path_actions:
max_path_length = len(p)
self._action_agent.set_action(p)
path = self._rollout_fn(
self._env,
self._action_agent,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
num_steps_total=self._num_steps_total,
num_paths_total=self._num_paths_total,
)
if self._save_env_in_snapshot:
snapshot_dict['env'] = self._env
return snapshot_dict
def load_from_snapshot(self, snapshot):
self._policy = snapshot['policy']
self._num_steps_total = snapshot['num_steps_total']
self._num_paths_total = snapshot['num_paths_total']
if self._save_env_in_snapshot:
assert 'env' in snapshot
if hasattr(self._env, '_custom_goal_sampler'):
snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler
self._env = snapshot['env']
class GoalConditionedPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
desired_goal_key='desired_goal',
goal_sampling_mode=None,
**kwargs
):
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._goal_sampling_mode = goal_sampling_mode
def collect_new_paths(self, *args, **kwargs):
self._env.goal_sampling_mode = self._goal_sampling_mode
return super().collect_new_paths(*args, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
return snapshot
def load_from_snapshot(self, snapshot):
super().load_from_snapshot(snapshot)
self._observation_key = snapshot['observation_key']
self._desired_goal_key = snapshot['desired_goal_key']
class ObsDictPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
**kwargs
):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
)
return snapshot
class VAEWrappedEnvPathCollector(GoalConditionedPathCollector):
def __init__(
self,
env,
policy,
decode_goals=False,
**kwargs
):
super().__init__(env, policy, **kwargs)
self._decode_goals = decode_goals
def collect_new_paths(self, *args, **kwargs):
self._env.decode_goals = self._decode_goals
return super().collect_new_paths(*args, **kwargs)
| true | true |
f71b041f1c1924df958e173865289e1f39ee38d2 | 1,367 | py | Python | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None):
self.value = value
self.next_link = None
| 34.175 | 80 | 0.597659 |
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None):
self.value = value
self.next_link = None
| true | true |
f71b05b697f301dc72ecb335f3b5caf31b92323a | 1,766 | py | Python | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as nr
from plaidrl.exploration_strategies.base import RawExplorationStrategy
class OUStrategy(RawExplorationStrategy):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
Based on the rllab implementation.
"""
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=None,
decay_period=100000,
):
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
| 30.448276 | 81 | 0.624575 | import numpy as np
import numpy.random as nr
from plaidrl.exploration_strategies.base import RawExplorationStrategy
class OUStrategy(RawExplorationStrategy):
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=None,
decay_period=100000,
):
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
| true | true |
f71b05f85e4726833080015f2927cdaf291362a9 | 5,561 | py | Python | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | 2 | 2021-06-01T08:33:04.000Z | 2021-08-20T04:22:39.000Z | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | expected_inventory_list = {
'count': 2,
'results': [{'detail': 'http://testserver/api/inventories/1/',
'groups_file': 'web_nornir/nornir_config/example_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/example_config/hosts.yaml',
'id': 1,
'name': 'Example',
'type': 1},
{'detail': 'http://testserver/api/inventories/2/',
'groups_file': 'web_nornir/nornir_config/inslab_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/inslab_config/hosts.yaml',
'id': 2,
'name': 'INS Lab',
'type': 1}],
}
expected_jobtemplate_list = {
'count': 5,
'next': None,
'previous': None,
'results': [{'created_by': 1,
'created_name': 'thomastest',
'description': 'This prints a hello world',
'detail': 'http://testserver/api/templates/1/',
'file_name': 'hello_world.py',
'function_name': 'job_function',
'id': 1,
'name': 'hello_world',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Lists all CDP neighbors',
'detail': 'http://testserver/api/templates/2/',
'file_name': 'get_cdp_neighbors.py',
'function_name': 'job_function',
'id': 2,
'name': 'Get CDP Neighbors',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets brief information about all interfaces, sh '
'ip int br',
'detail': 'http://testserver/api/templates/3/',
'file_name': 'get_interfaces.py',
'function_name': 'job_function',
'id': 3,
'name': 'Get Interfaces',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Pings a chosen network device and reports if '
'reachable',
'detail': 'http://testserver/api/templates/4/',
'file_name': 'ping.py',
'function_name': 'job_function',
'id': 4,
'name': 'Ping Device',
'package_path': '/web_nornir/job_templates/',
'variables': ['target']},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets all configuration from device',
'detail': 'http://testserver/api/templates/5/',
'file_name': 'get_configuration.py',
'function_name': 'job_function',
'id': 5,
'name': 'Get Configuration',
'package_path': '/web_nornir/job_templates/',
'variables': []},
]
}
expected_task_list = {
'count': 3,
'next': None,
'previous': None,
'results': [{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/3/',
'filters': {},
'id': 3,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get interfaces of INS lab',
'result': {},
'status': 0,
'template': 3,
'template_name': 'Get Interfaces',
'is_template': False,
'variables': {}},
{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/2/',
'filters': {},
'id': 2,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get CDP neighbors of INS lab',
'result': {},
'status': 0,
'template': 2,
'template_name': 'Get CDP Neighbors',
'is_template': False,
'variables': {}},
{'created_by': 1,
'created_name': 'thomastest',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/1/',
'filters': {},
'id': 1,
'inventory': 1,
'inventory_name': 'Example',
'name': 'Get Hello World',
'result': {},
'status': 0,
'template': 1,
'template_name': 'hello_world',
'is_template': False,
'variables': {}}
],
}
| 41.81203 | 87 | 0.420428 | expected_inventory_list = {
'count': 2,
'results': [{'detail': 'http://testserver/api/inventories/1/',
'groups_file': 'web_nornir/nornir_config/example_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/example_config/hosts.yaml',
'id': 1,
'name': 'Example',
'type': 1},
{'detail': 'http://testserver/api/inventories/2/',
'groups_file': 'web_nornir/nornir_config/inslab_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/inslab_config/hosts.yaml',
'id': 2,
'name': 'INS Lab',
'type': 1}],
}
expected_jobtemplate_list = {
'count': 5,
'next': None,
'previous': None,
'results': [{'created_by': 1,
'created_name': 'thomastest',
'description': 'This prints a hello world',
'detail': 'http://testserver/api/templates/1/',
'file_name': 'hello_world.py',
'function_name': 'job_function',
'id': 1,
'name': 'hello_world',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Lists all CDP neighbors',
'detail': 'http://testserver/api/templates/2/',
'file_name': 'get_cdp_neighbors.py',
'function_name': 'job_function',
'id': 2,
'name': 'Get CDP Neighbors',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets brief information about all interfaces, sh '
'ip int br',
'detail': 'http://testserver/api/templates/3/',
'file_name': 'get_interfaces.py',
'function_name': 'job_function',
'id': 3,
'name': 'Get Interfaces',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Pings a chosen network device and reports if '
'reachable',
'detail': 'http://testserver/api/templates/4/',
'file_name': 'ping.py',
'function_name': 'job_function',
'id': 4,
'name': 'Ping Device',
'package_path': '/web_nornir/job_templates/',
'variables': ['target']},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets all configuration from device',
'detail': 'http://testserver/api/templates/5/',
'file_name': 'get_configuration.py',
'function_name': 'job_function',
'id': 5,
'name': 'Get Configuration',
'package_path': '/web_nornir/job_templates/',
'variables': []},
]
}
expected_task_list = {
'count': 3,
'next': None,
'previous': None,
'results': [{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/3/',
'filters': {},
'id': 3,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get interfaces of INS lab',
'result': {},
'status': 0,
'template': 3,
'template_name': 'Get Interfaces',
'is_template': False,
'variables': {}},
{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/2/',
'filters': {},
'id': 2,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get CDP neighbors of INS lab',
'result': {},
'status': 0,
'template': 2,
'template_name': 'Get CDP Neighbors',
'is_template': False,
'variables': {}},
{'created_by': 1,
'created_name': 'thomastest',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/1/',
'filters': {},
'id': 1,
'inventory': 1,
'inventory_name': 'Example',
'name': 'Get Hello World',
'result': {},
'status': 0,
'template': 1,
'template_name': 'hello_world',
'is_template': False,
'variables': {}}
],
}
| true | true |
f71b0609c2bed09adba0e74d664508aaf13cf106 | 506 | py | Python | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.iff"
result.attribute_template_id = -1
result.stfName("quest_item_n","fs_craft_puzzle_decryption_chip")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.764706 | 106 | 0.754941 | true | true | |
f71b0616df00ef53eb05436aa01e9020ee471bc6 | 7,155 | py | Python | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 3 | 2020-04-01T15:31:10.000Z | 2020-04-13T12:30:37.000Z | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 1 | 2020-09-01T03:42:10.000Z | 2020-09-01T03:42:10.000Z | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 7 | 2021-04-20T09:27:54.000Z | 2022-03-07T11:41:38.000Z | # coding: utf-8
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
print('Loading data...')
# load or create your dataset
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
num_train, num_feature = X_train.shape
# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# generate feature names
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Starting training...')
# feature_name and categorical_feature
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train, # eval training data
feature_name=feature_name,
categorical_feature=[21])
print('Finished first 10 rounds...')
# check feature name
print('7th feature name is:', lgb_train.feature_name[6])
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Dumping model to JSON...')
# dump model to JSON (and save to file)
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
# feature names
print('Feature names:', gbm.feature_name())
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
print('Loading model to predict...')
# load model to predict
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
print('Dumping and loading model with pickle...')
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finished 10 - 20 rounds with model file...')
# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finished 20 - 30 rounds with decay learning rates...')
# change other parameters during training
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finished 30 - 40 rounds with changing bagging_fraction...')
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
# self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# binary error
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
# another self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# accuracy
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def accuracy(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'accuracy', np.mean(labels == (preds > 0.5)), True
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=lambda preds, train_data: [binary_error(preds, train_data),
accuracy(preds, train_data)],
valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function '
'and multiple self-defined eval metrics...')
print('Starting a new training job...')
# callback
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new_valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finished first 10 rounds with callback function...')
| 32.821101 | 103 | 0.665409 |
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
print('Loading data...')
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
num_train, num_feature = X_train.shape
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Starting training...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
feature_name=feature_name,
categorical_feature=[21])
print('Finished first 10 rounds...')
print('7th feature name is:', lgb_train.feature_name[6])
print('Saving model...')
gbm.save_model('model.txt')
print('Dumping model to JSON...')
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
print('Feature names:', gbm.feature_name())
print('Feature importances:', list(gbm.feature_importance()))
print('Loading model to predict...')
bst = lgb.Booster(model_file='model.txt')
y_pred = bst.predict(X_test)
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
print('Dumping and loading model with pickle...')
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finished 10 - 20 rounds with model file...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finished 20 - 30 rounds with decay learning rates...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finished 30 - 40 rounds with changing bagging_fraction...')
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
def accuracy(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'accuracy', np.mean(labels == (preds > 0.5)), True
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=lambda preds, train_data: [binary_error(preds, train_data),
accuracy(preds, train_data)],
valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function '
'and multiple self-defined eval metrics...')
print('Starting a new training job...')
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new_valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finished first 10 rounds with callback function...')
| true | true |
f71b069e135a883bde77ccb48cb42ece31feb8eb | 5,298 | py | Python | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 3 | 2019-02-03T20:46:55.000Z | 2019-03-04T15:44:28.000Z | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 10 | 2018-11-26T23:16:45.000Z | 2019-02-18T23:17:03.000Z | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 2 | 2021-07-02T14:15:24.000Z | 2021-08-16T12:31:49.000Z | import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import TIMESTAMP_FORMAT, parse_os_release, run
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
# Fix support for multi-line usage strings
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--output", help="Filename of output tarball")
parser.add_argument("--skip-db", action="store_true", help="Skip database backup")
parser.add_argument("--skip-uploads", action="store_true", help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix=f"zulip-backup-{timestamp}-",
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf"),
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject")),
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options["skip_db"]:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file=" + os.path.join(tmp, "zulip-backup", "database"),
"--host=" + settings.DATABASES["default"]["HOST"],
"--port=" + settings.DATABASES["default"]["PORT"],
"--username=" + settings.DATABASES["default"]["USER"],
"--dbname=" + settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if (
not options["skip_uploads"]
and settings.LOCAL_UPLOADS_DIR is not None
and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
),
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path),
name.replace("\\", r"\\"),
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix=f"zulip-backup-{timestamp}-",
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
[
"tar",
f"--directory={tmp}",
"-cPzf",
tarball_path,
*transform_args,
"--",
*members,
]
)
print(f"Backup tarball written to {tarball_path}")
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
| 38.391304 | 94 | 0.495281 | import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import TIMESTAMP_FORMAT, parse_os_release, run
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--output", help="Filename of output tarball")
parser.add_argument("--skip-db", action="store_true", help="Skip database backup")
parser.add_argument("--skip-uploads", action="store_true", help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix=f"zulip-backup-{timestamp}-",
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf"),
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject")),
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options["skip_db"]:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file=" + os.path.join(tmp, "zulip-backup", "database"),
"--host=" + settings.DATABASES["default"]["HOST"],
"--port=" + settings.DATABASES["default"]["PORT"],
"--username=" + settings.DATABASES["default"]["USER"],
"--dbname=" + settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if (
not options["skip_uploads"]
and settings.LOCAL_UPLOADS_DIR is not None
and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
),
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path),
name.replace("\\", r"\\"),
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix=f"zulip-backup-{timestamp}-",
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
[
"tar",
f"--directory={tmp}",
"-cPzf",
tarball_path,
*transform_args,
"--",
*members,
]
)
print(f"Backup tarball written to {tarball_path}")
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
| true | true |
f71b06a727f087f2bc7415f3706874d40d893939 | 92 | py | Python | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 1 | 2021-09-17T23:45:27.000Z | 2021-09-17T23:45:27.000Z | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 3 | 2020-07-25T05:40:54.000Z | 2020-08-11T04:01:19.000Z | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | null | null | null | # arvestust:serializers:mixins
from .arvestust_record import ArvestustRecordSerializerMixin
| 30.666667 | 60 | 0.891304 |
from .arvestust_record import ArvestustRecordSerializerMixin
| true | true |
f71b08cb676579ac4fac189d7b267ecde83114fa | 784 | py | Python | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | import os
from google.cloud import secretmanager
class Secret:
def __init__(self):
# Create the Secret Manager client.
self.client = secretmanager.SecretManagerServiceClient()
self.project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
def get_secret(self, secret_id):
# Build the parent name from the project.
name = f"projects/{self.project_id}/secrets/{secret_id}/versions/latest"
# Access the secret version.
response = self.client.access_secret_version(request={"name": name})
# Print the secret payload.
#
# WARNING: Do not print the secret in a production environment - this
# snippet is showing how to access the secret material.
return response.payload.data.decode("UTF-8")
| 34.086957 | 80 | 0.678571 | import os
from google.cloud import secretmanager
class Secret:
def __init__(self):
self.client = secretmanager.SecretManagerServiceClient()
self.project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
def get_secret(self, secret_id):
name = f"projects/{self.project_id}/secrets/{secret_id}/versions/latest"
response = self.client.access_secret_version(request={"name": name})
return response.payload.data.decode("UTF-8")
| true | true |
f71b08ee83709fcec57f27b82915c939fb73d449 | 1,341 | py | Python | tag_generator.py | TREYWANGCQU/blog.reaticle.com | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | 1 | 2021-07-24T16:54:05.000Z | 2021-07-24T16:54:05.000Z | tag_generator.py | TREYWANGCQU/treywangcqu.github.io | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | null | null | null | tag_generator.py | TREYWANGCQU/treywangcqu.github.io | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: lqian8@jhu.edu
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags ==[]:
continue
if current_tags[0]== 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| 23.946429 | 109 | 0.56525 |
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags ==[]:
continue
if current_tags[0]== 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| true | true |
f71b09215d4861e1ba4d13dd94a6b1b30cfd4265 | 950 | py | Python | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | from env_wrapper import DIAYN_Skill_Wrapper
from stable_baselines3 import SAC
from stable_baselines3.common.env_checker import check_env
import malmoenv
import gym
from pathlib import Path
xml = Path('/home/zilizhang/DIAYN/mobchase_single_agent.xml').read_text()
env = malmoenv.make()
env.init(xml, 9000)
total_timesteps = 3000
num_skills = 3
print(env.reward_range)
env = DIAYN_Skill_Wrapper(env, num_skills=num_skills)
#
# #check_env(env)
# obs = env.reset()
# env = gym.make('Walker2DMuJoCoEnv-v0')
n_steps = 10
obs = env.reset()
done = False
for _ in range(n_steps):
# Random action
if not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("shape of observation", obs.shape)
print("current action:", env.action_space)
else:
print("has done")
# action = env.action_space.sample()
# obs, reward, done, info = env.step(action)
# print(reward)
| 27.142857 | 73 | 0.709474 | from env_wrapper import DIAYN_Skill_Wrapper
from stable_baselines3 import SAC
from stable_baselines3.common.env_checker import check_env
import malmoenv
import gym
from pathlib import Path
xml = Path('/home/zilizhang/DIAYN/mobchase_single_agent.xml').read_text()
env = malmoenv.make()
env.init(xml, 9000)
total_timesteps = 3000
num_skills = 3
print(env.reward_range)
env = DIAYN_Skill_Wrapper(env, num_skills=num_skills)
obs = env.reset()
done = False
for _ in range(n_steps):
if not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("shape of observation", obs.shape)
print("current action:", env.action_space)
else:
print("has done")
| true | true |
f71b0955c31f832ac1f4829e34136ad342dd11b3 | 11,350 | py | Python | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2017 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander / All Developers are free to add commands for new features
R""" Electrostatic potentials.
Charged interactions are usually long ranged, and for computational efficiency this is split
into two parts, one part computed in real space and on in Fourier space. You don't need to worry about this
implementation detail, however, as charge commands in hoomd automatically initialize and configure both the long
and short range parts.
Only one method of computing charged interactions should be used at a time. Otherwise, they would add together and
produce incorrect results.
"""
from hoomd.md import force;
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import pair;
from hoomd.md import nlist as nl # to avoid naming conflicts
import hoomd;
import math;
import sys;
from math import sqrt
class pppm(force._force):
R""" Long-range electrostatics computed with the PPPM method.
Args:
group (:py:mod:`hoomd.group`): Group on which to apply long range PPPM forces. The short range part is always applied between
all particles.
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
`D. LeBard et. al. 2012 <http://dx.doi.org/10.1039/c1sm06787g>`_ describes the PPPM implementation details in
HOOMD-blue. Please cite it if you utilize the PPPM functionality in your work.
:py:class:`pppm` specifies **both** the long-ranged **and** short range parts of the electrostatic
force should be computed between all charged particles in the simulation. In other words, :py:class:`pppm`
initializes and sets all parameters for its own :py:class:`hoomd.md.pair.ewald`, so do not specify an additional one.
The command supports additional screening of interactions, according to the Ewald summation for Yukawa potentials.
This is useful if one wants to compute a screened interaction (i.e. a solution to the linerized Poisson-Boltzmann
equation), yet the cut-off radius is so large that the computation with a purely short-ranged potential would become
inefficient. In that case, the inverse Debye screening length can be supplied using :py:meth`set_params()`.
Also see `Salin, G and Caillol, J. 2000, <http://dx.doi.org/10.1063/1.1326477>`.
Parameters:
- Nx - Number of grid points in x direction
- Ny - Number of grid points in y direction
- Nz - Number of grid points in z direction
- order - Number of grid points in each direction to assign charges to
- :math:`r_{\mathrm{cut}}` - Cutoff for the short-ranged part of the electrostatics calculation
Parameters Nx, Ny, Nz, order, :math:`r_{\mathrm{cut}}` must be set using
:py:meth:`set_params()` before any :py:func:`hoomd.run()` can take place.
See :ref:`page-units` for information on the units assigned to charges in hoomd.
Note:
:py:class:`pppm` takes a particle group as an option. This should be the group of all charged particles
(:py:func:`hoomd.group.charged`). However, note that this group is static and determined at the time
:py:class:`pppm` is specified. If you are going to add charged particles at a later point in the simulation
with the data access API, ensure that this group includes those particles as well.
.. important::
In MPI simulations, the number of grid point along every dimensions must be a power of two.
Example::
charged = group.charged();
pppm = charge.pppm(group=charged)
"""
def __init__(self, group, nlist):
hoomd.util.print_status_line();
# initialize the base class
force._force.__init__(self);
# register the citation
c = hoomd.cite.article(cite_key='dnlebard2012',
author=['D N LeBard', 'B G Levine', 'S A Barr', 'A Jusufi', 'S Sanders', 'M L Klein', 'A Z Panagiotopoulos'],
title='Self-assembly of coarse-grained ionic surfactants accelerated by graphics processing units',
journal='Journal of Computational Physics',
volume=8,
number=8,
pages='2385-2397',
month='',
year='2012',
doi='10.1039/c1sm06787g',
feature='PPPM')
hoomd.cite._ensure_global_bib().add(c)
# create the c++ mirror class
# PPPM itself doesn't really need a neighbor list, so subscribe call back as None
self.nlist = nlist
self.nlist.subscribe(lambda : None)
self.nlist.update_rcut()
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PPPMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
else:
self.cpp_force = _md.PPPMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# error check flag - must be set to true by set_params in order for the run() to commence
self.params_set = False;
# initialize the short range part of electrostatics
hoomd.util.quiet_status();
self.ewald = pair.ewald(r_cut = False, nlist = self.nlist);
hoomd.util.unquiet_status();
# overrride disable and enable to work with both of the forces
def disable(self, log=False):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.disable(self, log);
self.ewald.disable(log);
hoomd.util.unquiet_status();
def enable(self):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.enable(self);
self.ewald.enable();
hoomd.util.unquiet_status();
def set_params(self, Nx, Ny, Nz, order, rcut, alpha = 0.0):
""" Sets PPPM parameters.
Args:
Nx (int): Number of grid points in x direction
Ny (int): Number of grid points in y direction
Nz (int): Number of grid points in z direction
order (int): Number of grid points in each direction to assign charges to
rcut (float): Cutoff for the short-ranged part of the electrostatics calculation
alpha (float, **optional**): Debye screening parameter (in units 1/distance)
.. versionadded:: 2.1
Examples::
pppm.set_params(Nx=64, Ny=64, Nz=64, order=6, rcut=2.0)
Note that the Fourier transforms are much faster for number of grid points of the form 2^N.
"""
hoomd.util.print_status_line();
if hoomd.context.current.system_definition.getNDimensions() != 3:
hoomd.context.msg.error("System must be 3 dimensional\n");
raise RuntimeError("Cannot compute PPPM");
self.params_set = True;
# get sum of charges and of squared charges
q = self.cpp_force.getQSum();
q2 = self.cpp_force.getQ2Sum();
N = hoomd.context.current.system_definition.getParticleData().getNGlobal()
box = hoomd.context.current.system_definition.getParticleData().getGlobalBox()
Lx = box.getL().x
Ly = box.getL().y
Lz = box.getL().z
hx = Lx/Nx
hy = Ly/Ny
hz = Lz/Nz
gew1 = 0.0
kappa = gew1
f = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
hmin = min(hx, hy, hz)
gew2 = 10.0/hmin
kappa = gew2
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if f*fmid >= 0.0:
hoomd.context.msg.error("f*fmid >= 0.0\n");
raise RuntimeError("Cannot compute PPPM");
if f < 0.0:
dgew=gew2-gew1
rtb = gew1
else:
dgew=gew1-gew2
rtb = gew2
ncount = 0
while math.fabs(dgew) > 0.00001 and fmid != 0.0:
dgew *= 0.5
kappa = rtb + dgew
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if fmid <= 0.0:
rtb = kappa
ncount += 1
if ncount > 10000.0:
hoomd.context.msg.error("kappa not converging\n");
raise RuntimeError("Cannot compute PPPM");
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
hoomd.util.quiet_status();
for i in range(0,ntypes):
for j in range(0,ntypes):
self.ewald.pair_coeff.set(type_list[i], type_list[j], kappa = kappa, alpha = alpha, r_cut=rcut)
hoomd.util.unquiet_status();
# set the parameters for the appropriate type
self.cpp_force.setParams(Nx, Ny, Nz, order, kappa, rcut, alpha);
def update_coeffs(self):
if not self.params_set:
hoomd.context.msg.error("Coefficients for PPPM are not set. Call set_coeff prior to run()\n");
raise RuntimeError("Error initializing run");
if self.nlist.cpp_nlist.getDiameterShift():
hoomd.context.msg.warning("Neighbor diameter shifting is enabled, PPPM may not correct for all excluded interactions\n");
def diffpr(hx, hy, hz, xprd, yprd, zprd, N, order, kappa, q2, rcut):
lprx = rms(hx, xprd, N, order, kappa, q2)
lpry = rms(hy, yprd, N, order, kappa, q2)
lprz = rms(hz, zprd, N, order, kappa, q2)
kspace_prec = math.sqrt(lprx*lprx + lpry*lpry + lprz*lprz) / sqrt(3.0)
real_prec = 2.0*q2 * math.exp(-kappa*kappa*rcut*rcut)/sqrt(N*rcut*xprd*yprd*zprd)
value = kspace_prec - real_prec
return value
def rms(h, prd, N, order, kappa, q2):
acons = [[0 for _ in range(8)] for _ in range(8)]
acons[1][0] = 2.0 / 3.0
acons[2][0] = 1.0 / 50.0
acons[2][1] = 5.0 / 294.0
acons[3][0] = 1.0 / 588.0
acons[3][1] = 7.0 / 1440.0
acons[3][2] = 21.0 / 3872.0
acons[4][0] = 1.0 / 4320.0
acons[4][1] = 3.0 / 1936.0
acons[4][2] = 7601.0 / 2271360.0
acons[4][3] = 143.0 / 28800.0
acons[5][0] = 1.0 / 23232.0
acons[5][1] = 7601.0 / 13628160.0
acons[5][2] = 143.0 / 69120.0
acons[5][3] = 517231.0 / 106536960.0
acons[5][4] = 106640677.0 / 11737571328.0
acons[6][0] = 691.0 / 68140800.0
acons[6][1] = 13.0 / 57600.0
acons[6][2] = 47021.0 / 35512320.0
acons[6][3] = 9694607.0 / 2095994880.0
acons[6][4] = 733191589.0 / 59609088000.0
acons[6][5] = 326190917.0 / 11700633600.0
acons[7][0] = 1.0 / 345600.0
acons[7][1] = 3617.0 / 35512320.0
acons[7][2] = 745739.0 / 838397952.0
acons[7][3] = 56399353.0 / 12773376000.0
acons[7][4] = 25091609.0 / 1560084480.0
acons[7][5] = 1755948832039.0 / 36229939200000.0
acons[7][6] = 4887769399.0 / 37838389248.0
sum = 0.0
for m in range(0,order):
sum += acons[order][m]*pow(h*kappa, 2.0*m)
value = q2*pow(h*kappa,order)*sqrt(kappa*prd*sqrt(2.0*math.pi)*sum/N)/prd/prd
return value
| 40.974729 | 134 | 0.628458 |
from hoomd.md import force;
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import pair;
from hoomd.md import nlist as nl
import hoomd;
import math;
import sys;
from math import sqrt
class pppm(force._force):
def __init__(self, group, nlist):
hoomd.util.print_status_line();
force._force.__init__(self);
c = hoomd.cite.article(cite_key='dnlebard2012',
author=['D N LeBard', 'B G Levine', 'S A Barr', 'A Jusufi', 'S Sanders', 'M L Klein', 'A Z Panagiotopoulos'],
title='Self-assembly of coarse-grained ionic surfactants accelerated by graphics processing units',
journal='Journal of Computational Physics',
volume=8,
number=8,
pages='2385-2397',
month='',
year='2012',
doi='10.1039/c1sm06787g',
feature='PPPM')
hoomd.cite._ensure_global_bib().add(c)
self.nlist = nlist
self.nlist.subscribe(lambda : None)
self.nlist.update_rcut()
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PPPMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
else:
self.cpp_force = _md.PPPMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# error check flag - must be set to true by set_params in order for the run() to commence
self.params_set = False;
# initialize the short range part of electrostatics
hoomd.util.quiet_status();
self.ewald = pair.ewald(r_cut = False, nlist = self.nlist);
hoomd.util.unquiet_status();
# overrride disable and enable to work with both of the forces
def disable(self, log=False):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.disable(self, log);
self.ewald.disable(log);
hoomd.util.unquiet_status();
def enable(self):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.enable(self);
self.ewald.enable();
hoomd.util.unquiet_status();
def set_params(self, Nx, Ny, Nz, order, rcut, alpha = 0.0):
hoomd.util.print_status_line();
if hoomd.context.current.system_definition.getNDimensions() != 3:
hoomd.context.msg.error("System must be 3 dimensional\n");
raise RuntimeError("Cannot compute PPPM");
self.params_set = True;
# get sum of charges and of squared charges
q = self.cpp_force.getQSum();
q2 = self.cpp_force.getQ2Sum();
N = hoomd.context.current.system_definition.getParticleData().getNGlobal()
box = hoomd.context.current.system_definition.getParticleData().getGlobalBox()
Lx = box.getL().x
Ly = box.getL().y
Lz = box.getL().z
hx = Lx/Nx
hy = Ly/Ny
hz = Lz/Nz
gew1 = 0.0
kappa = gew1
f = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
hmin = min(hx, hy, hz)
gew2 = 10.0/hmin
kappa = gew2
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if f*fmid >= 0.0:
hoomd.context.msg.error("f*fmid >= 0.0\n");
raise RuntimeError("Cannot compute PPPM");
if f < 0.0:
dgew=gew2-gew1
rtb = gew1
else:
dgew=gew1-gew2
rtb = gew2
ncount = 0
while math.fabs(dgew) > 0.00001 and fmid != 0.0:
dgew *= 0.5
kappa = rtb + dgew
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if fmid <= 0.0:
rtb = kappa
ncount += 1
if ncount > 10000.0:
hoomd.context.msg.error("kappa not converging\n");
raise RuntimeError("Cannot compute PPPM");
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
hoomd.util.quiet_status();
for i in range(0,ntypes):
for j in range(0,ntypes):
self.ewald.pair_coeff.set(type_list[i], type_list[j], kappa = kappa, alpha = alpha, r_cut=rcut)
hoomd.util.unquiet_status();
# set the parameters for the appropriate type
self.cpp_force.setParams(Nx, Ny, Nz, order, kappa, rcut, alpha);
def update_coeffs(self):
if not self.params_set:
hoomd.context.msg.error("Coefficients for PPPM are not set. Call set_coeff prior to run()\n");
raise RuntimeError("Error initializing run");
if self.nlist.cpp_nlist.getDiameterShift():
hoomd.context.msg.warning("Neighbor diameter shifting is enabled, PPPM may not correct for all excluded interactions\n");
def diffpr(hx, hy, hz, xprd, yprd, zprd, N, order, kappa, q2, rcut):
lprx = rms(hx, xprd, N, order, kappa, q2)
lpry = rms(hy, yprd, N, order, kappa, q2)
lprz = rms(hz, zprd, N, order, kappa, q2)
kspace_prec = math.sqrt(lprx*lprx + lpry*lpry + lprz*lprz) / sqrt(3.0)
real_prec = 2.0*q2 * math.exp(-kappa*kappa*rcut*rcut)/sqrt(N*rcut*xprd*yprd*zprd)
value = kspace_prec - real_prec
return value
def rms(h, prd, N, order, kappa, q2):
acons = [[0 for _ in range(8)] for _ in range(8)]
acons[1][0] = 2.0 / 3.0
acons[2][0] = 1.0 / 50.0
acons[2][1] = 5.0 / 294.0
acons[3][0] = 1.0 / 588.0
acons[3][1] = 7.0 / 1440.0
acons[3][2] = 21.0 / 3872.0
acons[4][0] = 1.0 / 4320.0
acons[4][1] = 3.0 / 1936.0
acons[4][2] = 7601.0 / 2271360.0
acons[4][3] = 143.0 / 28800.0
acons[5][0] = 1.0 / 23232.0
acons[5][1] = 7601.0 / 13628160.0
acons[5][2] = 143.0 / 69120.0
acons[5][3] = 517231.0 / 106536960.0
acons[5][4] = 106640677.0 / 11737571328.0
acons[6][0] = 691.0 / 68140800.0
acons[6][1] = 13.0 / 57600.0
acons[6][2] = 47021.0 / 35512320.0
acons[6][3] = 9694607.0 / 2095994880.0
acons[6][4] = 733191589.0 / 59609088000.0
acons[6][5] = 326190917.0 / 11700633600.0
acons[7][0] = 1.0 / 345600.0
acons[7][1] = 3617.0 / 35512320.0
acons[7][2] = 745739.0 / 838397952.0
acons[7][3] = 56399353.0 / 12773376000.0
acons[7][4] = 25091609.0 / 1560084480.0
acons[7][5] = 1755948832039.0 / 36229939200000.0
acons[7][6] = 4887769399.0 / 37838389248.0
sum = 0.0
for m in range(0,order):
sum += acons[order][m]*pow(h*kappa, 2.0*m)
value = q2*pow(h*kappa,order)*sqrt(kappa*prd*sqrt(2.0*math.pi)*sum/N)/prd/prd
return value
| true | true |
f71b09b86f70b649fd3f792fbe2c687f37f5e62d | 3,101 | py | Python | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | from tensorflow.keras.models import load_model
from clean import downsample_mono, envelope
from kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel
from sklearn.preprocessing import LabelEncoder
import numpy as np
from glob import glob
import argparse
import os
import pandas as pd
from tqdm import tqdm
def make_prediction(args):
# load the model
model = load_model(args.model_fn,
custom_objects={'STFT': STFT,
'Magnitude': Magnitude,
'ApplyFilterbank': ApplyFilterbank,
'MagnitudeToDecibel': MagnitudeToDecibel})
# find the sound data
wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)
wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])
classes = sorted(os.listdir(args.src_dir))
labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]
le = LabelEncoder()
y_true = le.fit_transform(labels)
results = []
for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):
rate, wav = downsample_mono(wav_fn, args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
clean_wav = wav[mask]
step = int(args.sr * args.dt)
batch = []
for i in range(0, clean_wav.shape[0], step):
sample = clean_wav[i:i + step]
sample = sample.reshape(-1, 1)
if sample.shape[0] < step:
tmp = np.zeros(shape=(step, 1), dtype=np.float32)
tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)
sample = tmp
batch.append(sample)
X_batch = np.array(batch, dtype=np.float32)
y_pred = model.predict(X_batch)
y_mean = np.mean(y_pred, axis=0)
y_pred = np.argmax(y_mean)
real_class = os.path.dirname(wav_fn).split('/')[-1]
print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))
results.append(y_mean)
np.save(os.path.join('logs', args.pred_fn), np.array(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audio Classification Training')
parser.add_argument('--model_fn', type=str, default='models/lstm.h5',
help='model file to make predictions')
parser.add_argument('--pred_fn', type=str, default='y_pred',
help='fn to write predictions in logs dir')
parser.add_argument('--src_dir', type=str, default='wavfiles',
help='directory containing wavfiles to predict')
parser.add_argument('--dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='sample rate of clean audio')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
make_prediction(args)
| 42.479452 | 90 | 0.609158 | from tensorflow.keras.models import load_model
from clean import downsample_mono, envelope
from kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel
from sklearn.preprocessing import LabelEncoder
import numpy as np
from glob import glob
import argparse
import os
import pandas as pd
from tqdm import tqdm
def make_prediction(args):
model = load_model(args.model_fn,
custom_objects={'STFT': STFT,
'Magnitude': Magnitude,
'ApplyFilterbank': ApplyFilterbank,
'MagnitudeToDecibel': MagnitudeToDecibel})
wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)
wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])
classes = sorted(os.listdir(args.src_dir))
labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]
le = LabelEncoder()
y_true = le.fit_transform(labels)
results = []
for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):
rate, wav = downsample_mono(wav_fn, args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
clean_wav = wav[mask]
step = int(args.sr * args.dt)
batch = []
for i in range(0, clean_wav.shape[0], step):
sample = clean_wav[i:i + step]
sample = sample.reshape(-1, 1)
if sample.shape[0] < step:
tmp = np.zeros(shape=(step, 1), dtype=np.float32)
tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)
sample = tmp
batch.append(sample)
X_batch = np.array(batch, dtype=np.float32)
y_pred = model.predict(X_batch)
y_mean = np.mean(y_pred, axis=0)
y_pred = np.argmax(y_mean)
real_class = os.path.dirname(wav_fn).split('/')[-1]
print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))
results.append(y_mean)
np.save(os.path.join('logs', args.pred_fn), np.array(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audio Classification Training')
parser.add_argument('--model_fn', type=str, default='models/lstm.h5',
help='model file to make predictions')
parser.add_argument('--pred_fn', type=str, default='y_pred',
help='fn to write predictions in logs dir')
parser.add_argument('--src_dir', type=str, default='wavfiles',
help='directory containing wavfiles to predict')
parser.add_argument('--dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='sample rate of clean audio')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
make_prediction(args)
| true | true |
f71b09c92a4cd91f0806d99eac65622c1baec8ea | 9,692 | py | Python | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | from functools import partial
from pprint import pprint
import matplotlib.pyplot as plt
# import test2
from simglucose.controller.base import Controller
#from datetime import datetime, timedelta, time
import numpy as np
import math
percent_value = 0.05
sign = lambda x: math.copysign(1, x)
normalize_f = lambda x: (x - 39) / (600 - 39)
class PaperRLController(Controller):
def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,
current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):
super().__init__(init_state)
np.random.seed(1)
self.a_hyper = a_hyper
self.hypo = a_hypo
self.GL = normalize_f(90)
self.GH = normalize_f(150)
self.current_basal_rate = current_basal_rate
self.current_breakfast_bolus = current_breakfast_bolus # bolus means IC ratio
self.current_lunch_bolus = current_lunch_bolus
self.current_dinner_bolus = current_dinner_bolus
# self.current_snack_bolus = current_snack_bolus
self.basal_theta = []
self.bolus_theta = []
# np.random.seed(2)
# self.bolus_theta = np.random.rand(2).tolist()
self.h = 0.5
self.c_sigma = 0.05
self.m = 0.5
self.previous_basal_rate = 0.0
np.random.seed(55)
self.w = (np.random.rand(2) * 2 - 1).tolist()
self._lambda = 0.5
self.gamma = 0.9
self.z = [0.0, 0.0]
self.a = 0.5
self.beta = 0.5
self.beta_basal = 0.5
self.value_factor = 10
# self.time_array = []
# self.theta_array_1 = []
# self.theta_array_2 = []
# self.bolus_time_array = []
# self.F_1_array = []
# self.F_2_array = []
# plt.figure(200)
# self.fig, self.axis = plt.subplots(4)
# plt.show()
# self.axis[0].set_title(" Hyper feature for basal")
# self.axis[1].set_title(" Hypo feature for basal")
# self.axis[2].set_title("Hyper theta for basal")
# self.axis[3].set_title(" Hypo theta for basal")
self.previous_state_basal = None
self.previous_state_breakfast = None
self.previous_state_lunch = None
self.previous_state_dinner = None
def extract_features(self, array):
M_hyper = []
M_hypo = []
for element in array:
if element > 150:
M_hyper.append(normalize_f(element))
elif element < 90:
M_hypo.append(normalize_f(element))
F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
return (F_hyper, F_hypo)
def calculate_basal(self, previous_state, basal_array, time):
F_hyper, F_hypo = self.extract_features(basal_array)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.time_array.append(time)
#
# self.axis[0].plot(self.time_array, self.F_1_array)
#
# self.axis[1].plot(self.time_array, self.F_2_array)
#
# plt.pause(0.001)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = -0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = -0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)
self.previous_basal_rate = self.current_basal_rate
br_change = self.m * P * self.current_basal_rate
# uncomment to enable 5 % change
# percent_value = 0
if abs(br_change / self.current_basal_rate) > percent_value:
self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)
print(" used % changed")
else:
self.current_basal_rate += br_change
print(" didn't use % changed")
return self.current_basal_rate
def calculate_bolus(self, previous_state, next_state, food_counter, time):
F_hyper, F_hypo = self.extract_features(next_state)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.bolus_time_array.append(time)
#
# self.axis[0].plot(self.bolus_time_array, self.F_1_array)
# self.axis[1].plot(self.bolus_time_array, self.F_2_array)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = +0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = +0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)
if food_counter == 0:
self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)
return self.current_breakfast_bolus
if food_counter == 1:
self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)
return self.current_lunch_bolus
if food_counter == 2:
self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)
return self.current_dinner_bolus
# if food_counter == 3:
# self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)
# return self.current_snack_bolus
return 0.0
def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):
if coming_from:
theta = self.basal_theta
previous_state = self.previous_state_basal
else:
theta = self.bolus_theta
if food_counter == 0:
previous_state = self.previous_state_breakfast
elif food_counter == 1:
previous_state = self.previous_state_lunch
elif food_counter == 2:
previous_state = self.previous_state_dinner
else:
return 0
# theta = self.theta
print(f"theta: {theta}")
Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])
Pd = self.h * Pa + (1 - self.h) * Ps
sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)
Pe = Pd + np.random.normal(0, sigma)
cost = 1 * F[0] + self.value_factor * F[1]
if not previous_state:
previous_state = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])
next_value = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])
d = cost + self.gamma * next_value - previous_state
self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]
self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]
if coming_from:
self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.basal_theta, F)]
self.previous_state_basal = next_value
else:
self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.bolus_theta, F)]
if food_counter == 0:
self.previous_state_breakfast = next_value
elif food_counter == 1:
self.previous_state_lunch = next_value
else:
self.previous_state_dinner = next_value
assert sigma > 0.0000001, "sigma is too low"
# self.theta_array_1.append(self.theta[0])
# self.theta_array_2.append(self.theta[1])
# self.axis[2].plot(self.time_array, self.theta_array_1)
# self.axis[3].plot(self.time_array, self.theta_array_2)
return Pe
def update_bolus(self, old_bolus, P):
fusion_rate = old_bolus + self.m * P * old_bolus
l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (
self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0
# fusion_rate = l * old_bolus + (1 - l) * fusion_rate
bl_change = fusion_rate - old_bolus
if abs(bl_change / old_bolus) > percent_value:
old_bolus += sign(bl_change) * old_bolus * percent_value
print(" used % changed")
else:
old_bolus += bl_change
print(" didn't use % changed")
return old_bolus
# if __name__ == '__main__':
#
# GL = normalize_f(90)
# GH = normalize_f(150)
#
# def extract_features(array):
# M_hyper = []
# M_hypo = []
#
# for element in array:
# if element > 150:
# M_hyper.append(normalize_f(element))
# elif element < 90:
# M_hypo.append(normalize_f(element))
#
# F_hyper = sum([element - GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
#
# F_hypo = sum([GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
#
# return (F_hyper, F_hypo)
#
# array = test2.array
# print(extract_features(array))
| 35.501832 | 119 | 0.596574 | from functools import partial
from pprint import pprint
import matplotlib.pyplot as plt
from simglucose.controller.base import Controller
import numpy as np
import math
percent_value = 0.05
sign = lambda x: math.copysign(1, x)
normalize_f = lambda x: (x - 39) / (600 - 39)
class PaperRLController(Controller):
def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,
current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):
super().__init__(init_state)
np.random.seed(1)
self.a_hyper = a_hyper
self.hypo = a_hypo
self.GL = normalize_f(90)
self.GH = normalize_f(150)
self.current_basal_rate = current_basal_rate
self.current_breakfast_bolus = current_breakfast_bolus
self.current_lunch_bolus = current_lunch_bolus
self.current_dinner_bolus = current_dinner_bolus
self.basal_theta = []
self.bolus_theta = []
self.h = 0.5
self.c_sigma = 0.05
self.m = 0.5
self.previous_basal_rate = 0.0
np.random.seed(55)
self.w = (np.random.rand(2) * 2 - 1).tolist()
self._lambda = 0.5
self.gamma = 0.9
self.z = [0.0, 0.0]
self.a = 0.5
self.beta = 0.5
self.beta_basal = 0.5
self.value_factor = 10
self.previous_state_basal = None
self.previous_state_breakfast = None
self.previous_state_lunch = None
self.previous_state_dinner = None
def extract_features(self, array):
M_hyper = []
M_hypo = []
for element in array:
if element > 150:
M_hyper.append(normalize_f(element))
elif element < 90:
M_hypo.append(normalize_f(element))
F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
return (F_hyper, F_hypo)
def calculate_basal(self, previous_state, basal_array, time):
F_hyper, F_hypo = self.extract_features(basal_array)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = -0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = -0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)
self.previous_basal_rate = self.current_basal_rate
br_change = self.m * P * self.current_basal_rate
if abs(br_change / self.current_basal_rate) > percent_value:
self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)
print(" used % changed")
else:
self.current_basal_rate += br_change
print(" didn't use % changed")
return self.current_basal_rate
def calculate_bolus(self, previous_state, next_state, food_counter, time):
F_hyper, F_hypo = self.extract_features(next_state)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.bolus_time_array.append(time)
#
# self.axis[0].plot(self.bolus_time_array, self.F_1_array)
# self.axis[1].plot(self.bolus_time_array, self.F_2_array)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = +0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = +0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)
if food_counter == 0:
self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)
return self.current_breakfast_bolus
if food_counter == 1:
self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)
return self.current_lunch_bolus
if food_counter == 2:
self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)
return self.current_dinner_bolus
# if food_counter == 3:
# self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)
# return self.current_snack_bolus
return 0.0
def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):
if coming_from:
theta = self.basal_theta
previous_state = self.previous_state_basal
else:
theta = self.bolus_theta
if food_counter == 0:
previous_state = self.previous_state_breakfast
elif food_counter == 1:
previous_state = self.previous_state_lunch
elif food_counter == 2:
previous_state = self.previous_state_dinner
else:
return 0
# theta = self.theta
print(f"theta: {theta}")
Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])
Pd = self.h * Pa + (1 - self.h) * Ps
sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)
Pe = Pd + np.random.normal(0, sigma)
cost = 1 * F[0] + self.value_factor * F[1]
if not previous_state:
previous_state = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])
next_value = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])
d = cost + self.gamma * next_value - previous_state
self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]
self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]
if coming_from:
self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.basal_theta, F)]
self.previous_state_basal = next_value
else:
self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.bolus_theta, F)]
if food_counter == 0:
self.previous_state_breakfast = next_value
elif food_counter == 1:
self.previous_state_lunch = next_value
else:
self.previous_state_dinner = next_value
assert sigma > 0.0000001, "sigma is too low"
# self.theta_array_1.append(self.theta[0])
# self.theta_array_2.append(self.theta[1])
# self.axis[2].plot(self.time_array, self.theta_array_1)
# self.axis[3].plot(self.time_array, self.theta_array_2)
return Pe
def update_bolus(self, old_bolus, P):
fusion_rate = old_bolus + self.m * P * old_bolus
l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (
self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0
# fusion_rate = l * old_bolus + (1 - l) * fusion_rate
bl_change = fusion_rate - old_bolus
if abs(bl_change / old_bolus) > percent_value:
old_bolus += sign(bl_change) * old_bolus * percent_value
print(" used % changed")
else:
old_bolus += bl_change
print(" didn't use % changed")
return old_bolus
| true | true |
f71b09cc7eff04c4f945a4c71943c706e084229f | 43,925 | py | Python | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_20_0)
expr5 = msat_make_plus(menv, xs[12], n_15_0)
expr6 = msat_make_plus(menv, xs[14], n_12_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[4], n_16_0)
expr2 = msat_make_plus(menv, xs[5], n_17_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_13_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[3], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[7], n_13_0)
expr5 = msat_make_plus(menv, xs[8], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_9_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_5_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_20_0)
expr7 = msat_make_plus(menv, xs[17], n_7_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[5], n_4_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_17_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_18_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_7_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_12_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[3], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[7], n_3_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_5_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_3_0)
expr5 = msat_make_plus(menv, xs[10], n_18_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[16], n_7_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_5_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_7_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_8_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_19_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[5], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_13_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_8_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[1], n_15_0)
expr2 = msat_make_plus(menv, xs[2], n_4_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_15_0)
expr5 = msat_make_plus(menv, xs[12], n_17_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_3_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_1_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[8], n_20_0)
expr4 = msat_make_plus(menv, xs[9], n_12_0)
expr5 = msat_make_plus(menv, xs[10], n_9_0)
expr6 = msat_make_plus(menv, xs[11], n_15_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[18], n_9_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_17_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[9], n_5_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[13], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[17], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_2_0)
expr8 = msat_make_plus(menv, xs[15], n_4_0)
expr9 = msat_make_plus(menv, xs[17], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_14_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_15_0)
expr4 = msat_make_plus(menv, xs[8], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_13_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[5], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_19_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[11], n_20_0)
expr7 = msat_make_plus(menv, xs[12], n_2_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[1], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[9], n_1_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[16], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
# ltl property: ((x_4 - x_10 > -8) & ((x_2 - x_12 > 17) U (x_3 - x_14 > 10)))
ltl = msat_make_and(menv, msat_make_gt(menv, msat_make_minus(menv, xs[4], xs[10]), msat_make_number(menv, "-8")), enc.make_U(msat_make_gt(menv, msat_make_minus(menv, xs[2], xs[12]), msat_make_number(menv, "17")), msat_make_gt(menv, msat_make_minus(menv, xs[3], xs[14]), msat_make_number(menv, "10"))))
return TermMap(curr2next), init, trans, ltl
| 54.02829 | 305 | 0.507388 |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_20_0)
expr5 = msat_make_plus(menv, xs[12], n_15_0)
expr6 = msat_make_plus(menv, xs[14], n_12_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[4], n_16_0)
expr2 = msat_make_plus(menv, xs[5], n_17_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_13_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[3], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[7], n_13_0)
expr5 = msat_make_plus(menv, xs[8], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_9_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_5_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_20_0)
expr7 = msat_make_plus(menv, xs[17], n_7_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[5], n_4_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_17_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_18_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_7_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_12_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[3], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[7], n_3_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_5_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_3_0)
expr5 = msat_make_plus(menv, xs[10], n_18_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[16], n_7_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_5_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_7_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_8_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_19_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[5], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_13_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_8_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[1], n_15_0)
expr2 = msat_make_plus(menv, xs[2], n_4_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_15_0)
expr5 = msat_make_plus(menv, xs[12], n_17_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_3_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_1_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[8], n_20_0)
expr4 = msat_make_plus(menv, xs[9], n_12_0)
expr5 = msat_make_plus(menv, xs[10], n_9_0)
expr6 = msat_make_plus(menv, xs[11], n_15_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[18], n_9_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_17_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[9], n_5_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[13], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[17], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_2_0)
expr8 = msat_make_plus(menv, xs[15], n_4_0)
expr9 = msat_make_plus(menv, xs[17], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_14_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_15_0)
expr4 = msat_make_plus(menv, xs[8], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_13_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[5], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_19_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[11], n_20_0)
expr7 = msat_make_plus(menv, xs[12], n_2_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[1], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[9], n_1_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[16], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
ltl = msat_make_and(menv, msat_make_gt(menv, msat_make_minus(menv, xs[4], xs[10]), msat_make_number(menv, "-8")), enc.make_U(msat_make_gt(menv, msat_make_minus(menv, xs[2], xs[12]), msat_make_number(menv, "17")), msat_make_gt(menv, msat_make_minus(menv, xs[3], xs[14]), msat_make_number(menv, "10"))))
return TermMap(curr2next), init, trans, ltl
| true | true |
f71b0aee44ad99983b9dca55c4966839e2bc48a0 | 724 | py | Python | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import aiohttp
import random
class Meme(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def meme(self, ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://www.reddit.com/r/memes.json") as r:
memes = await r.json()
embed = discord.Embed(color=discord.Color.random())
embed.set_image(url=memes['data']['children'][random.randint(0, 25)]['data']['url'])
embed.set_footer(text=f"Requested By {ctx.author}")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Meme(client)) | 27.846154 | 100 | 0.614641 | import discord
from discord.ext import commands
import aiohttp
import random
class Meme(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def meme(self, ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://www.reddit.com/r/memes.json") as r:
memes = await r.json()
embed = discord.Embed(color=discord.Color.random())
embed.set_image(url=memes['data']['children'][random.randint(0, 25)]['data']['url'])
embed.set_footer(text=f"Requested By {ctx.author}")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Meme(client)) | true | true |
f71b0d762d4dc64602e981f1eb7b945440962f2b | 512 | py | Python | Client/setup.py | KostasPakas17/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 289 | 2016-03-15T21:48:09.000Z | 2022-03-16T23:04:11.000Z | Client/setup.py | crypticterminal/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 39 | 2016-04-30T10:14:29.000Z | 2017-10-23T21:08:10.000Z | Client/setup.py | crypticterminal/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 115 | 2016-03-15T20:25:57.000Z | 2021-11-08T23:49:31.000Z | '''
Written for DigitalOcean's Hacktoberfest!
Requires cx_Freeze and must be built on Windows :(
Unfortunately, neither cx_Freeze nor py2exe support cross platform compilation
thus, this particular solution was set into motion
'''
import sys
from cx_Freeze import setup, Executable
setup(
name = "RSPET Test", #Change these values to your liking
version = "0.1",
description = "A Test Executable",
executables = [Executable("rspet_client.py", base = "Win32GUI")]) | 28.444444 | 79 | 0.701172 |
import sys
from cx_Freeze import setup, Executable
setup(
name = "RSPET Test",
version = "0.1",
description = "A Test Executable",
executables = [Executable("rspet_client.py", base = "Win32GUI")]) | true | true |
f71b0e90747b4d9d2219b1202a357213e814bbef | 6,923 | py | Python | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 3,348 | 2016-05-25T16:04:31.000Z | 2018-03-28T17:46:14.000Z | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 1,542 | 2016-05-25T16:46:44.000Z | 2018-03-29T17:30:23.000Z | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 702 | 2016-05-25T16:07:43.000Z | 2018-03-27T06:31:07.000Z | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' execute.py '''
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
################################################################################
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
java_path = config.get_java_path()
if java_path is None:
err_context = "Unable to find java command"
return SimpleResult(Status.InvocationError, err_context)
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [java_path, "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: `%s`", ' '.join(shlex.quote(a) for a in all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines):
'''
:param class_name:
:param topology_tar:
:param arguments:
:param tmpdir_root:
:param java_defines:
:return:
'''
# Extract tar to a tmp folder.
tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp')
with contextlib.closing(tarfile.open(topology_tar)) as tar:
tar.extractall(path=tmpdir)
# A tar generated by pants has all dependency jars under libs/
# in addition to the topology jar at top level. Pants keeps
# filename for jar and tar the same except for extension.
topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar"
extra_jars = [
os.path.join(tmpdir, topology_jar),
os.path.join(tmpdir, "*"),
os.path.join(tmpdir, "libs/*")
]
lib_jars = config.get_heron_libs(jars.topology_jars())
# Now execute the class
return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def heron_pex(topology_pex, topology_class_name, args=None):
"""Use a topology defined in a PEX."""
Log.debug("Importing %s from %s", topology_class_name, topology_pex)
if topology_class_name == '-':
# loading topology by running its main method (if __name__ == "__main__")
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_pex]
if args is not None:
cmd.extend(args)
Log.debug("Invoking class using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# pylint: disable=fixme
# todo(rli): improve python topology submission workflow
return ProcessResult(process)
try:
# loading topology from Topology's subclass (no main method)
# to support specifying the name of topology
Log.debug("args: %s", args)
if args is not None and isinstance(args, (list, tuple)) and len(args) > 0:
opts.set_config('cmdline.topology.name', args[0])
os.environ["HERON_OPTIONS"] = opts.get_heron_config()
Log.debug("Heron options: {%s}", os.environ["HERON_OPTIONS"])
pex_loader.load_pex(topology_pex)
topology_class = pex_loader.import_and_get_class(topology_pex, topology_class_name)
topology_class.write()
return SimpleResult(Status.Ok)
except Exception as ex:
Log.debug(traceback.format_exc())
err_context = f"Topology {topology_class_name} failed to be loaded from the given pex: {ex}"
return SimpleResult(Status.HeronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
| 38.675978 | 99 | 0.707497 |
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
eronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
| true | true |
f71b0f0cae9bc62f10f26544cdf87a9d56fde71e | 85,110 | py | Python | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import logging
import random
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import aiosqlite
from blspy import AugSchemeMPL
import src.server.ws_connection as ws # lgtm [py/import-and-import-from]
from src.consensus.block_creation import unfinished_block_to_full_block
from src.consensus.block_record import BlockRecord
from src.consensus.blockchain import Blockchain, ReceiveBlockResult
from src.consensus.constants import ConsensusConstants
from src.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from src.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from src.consensus.multiprocess_validation import PreValidationResult
from src.consensus.network_type import NetworkType
from src.consensus.pot_iterations import calculate_sp_iters
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_store import SyncStore
from src.full_node.weight_proof import WeightProofHandler
from src.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from src.protocols.full_node_protocol import RejectBlocks, RequestBlocks, RespondBlock, RespondBlocks
from src.protocols.protocol_message_types import ProtocolMessageTypes
from src.server.node_discovery import FullNodePeers
from src.server.outbound_message import Message, NodeType, make_msg
from src.server.server import ChiaServer
from src.types.blockchain_format.classgroup import ClassgroupElement
from src.types.blockchain_format.pool_target import PoolTarget
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from src.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
from src.types.full_block import FullBlock
from src.types.header_block import HeaderBlock
from src.types.mempool_inclusion_status import MempoolInclusionStatus
from src.types.spend_bundle import SpendBundle
from src.types.unfinished_block import UnfinishedBlock
from src.util.errors import ConsensusError, Err
from src.util.genesis_wait import wait_for_genesis_challenge
from src.util.ints import uint8, uint32, uint64, uint128
from src.util.path import mkdir, path_from_root
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
full_node_peers: Optional[FullNodePeers]
sync_store: Any
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.initialized = False
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False # Set to true to close all infinite loops
self.constants = consensus_constants
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
self.full_node_peers = None
self.sync_store = None
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
self.db_path = path_from_root(root_path, db_path_replaced)
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def regular_start(self):
self.log.info("regular_start")
self.connection = await aiosqlite.connect(self.db_path)
self.block_store = await BlockStore.create(self.connection)
self.full_node_store = await FullNodeStore.create(self.constants)
self.sync_store = await SyncStore.create()
self.coin_store = await CoinStore.create(self.connection)
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().height}, "
f"time taken: {int(time_taken)}s"
)
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak())
assert len(pending_tx) == 0 # no pending transactions when starting up
peak: Optional[BlockRecord] = self.blockchain.get_peak()
self.uncompact_task = None
if peak is not None:
full_peak = await self.blockchain.get_full_peak()
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
if self.config["send_uncompact_interval"] != 0:
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
)
)
self.initialized = True
async def delayed_start(self):
self.log.info("delayed_start")
config, constants = await wait_for_genesis_challenge(self.root_path, self.constants, "full_node")
self.config = config
self.constants = constants
await self.regular_start()
async def _start(self):
self.timelord_lock = asyncio.Lock()
# create the store (db) and full node instance
if self.constants.GENESIS_CHALLENGE is not None:
await self.regular_start()
else:
asyncio.create_task(self.delayed_start())
def set_server(self, server: ChiaServer):
self.server = server
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.full_node_peers.start())
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def short_sync_batch(self, peer: ws.WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
"""
Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first
block that we download is not connected to our chain, we return False and do an expensive long sync instead.
Long sync is not preferred because it requires downloading and validating a weight proof.
Args:
peer: peer to sync from
start_height: height that we should start downloading at. (Our peak is higher)
target_height: target to sync to
Returns:
False if the fork point was not found, and we need to do a long sync. True otherwise.
"""
# Don't trigger multiple batch syncs to the same peer
if (
peer.peer_node_id in self.sync_store.backtrack_syncing
and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0
):
return True # Don't batch sync, we are already in progress of a backtrack sync
if peer.peer_node_id in self.sync_store.batch_syncing:
return True # Don't trigger a long sync
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False))
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}")
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
# First sb not connected to our blockchain, do a long sync instead
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
try:
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.request_blocks(request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.lock:
success, advanced_peak, fork_height = await self.receive_block_batch(response.blocks, peer, None)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if advanced_peak:
peak = self.blockchain.get_peak()
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak is not None and peak_fb is not None and fork_height is not None
await self.peak_post_processing(peak_fb, peak, fork_height, peer)
self.log.info(f"Added blocks {height}-{end_height}")
except Exception:
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
async def short_sync_backtrack(
self, peer: ws.WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
):
"""
Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not
find the fork point 5 deeper than our peak, we return False and do a long sync instead.
Args:
peer: peer to sync from
peak_height: height of our peak
target_height: target height
target_unf_hash: partial hash of the unfinished block of the target
Returns:
True iff we found the fork point, and we do not need to long sync.
"""
try:
if peer.peer_node_id not in self.sync_store.backtrack_syncing:
self.sync_store.backtrack_syncing[peer.peer_node_id] = 0
self.sync_store.backtrack_syncing[peer.peer_node_id] += 1
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
responses = []
while curr_height > peak_height - 5:
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
# already have the unfinished block, from when it was broadcast, so we just need to download the header,
# but not the transactions
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx))
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, wrong type {type(curr)}"
)
responses.append(curr)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for response in reversed(responses):
await self.respond_block(response)
except Exception as e:
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
raise e
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
return found_fork_point
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection):
"""
We have received a notification of a new peak from a peer. This happens either when we have just connected,
or when the peer has updated their peak.
Args:
request: information about the new peak
peer: peer that sent the message
"""
# Store this peak/peer combination in case we want to sync to it, and to keep track of peers
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
if self.blockchain.contains_block(request.header_hash):
return None
# Not interested in less heavy peaks
peak: Optional[BlockRecord] = self.blockchain.get_peak()
curr_peak_height = uint32(0) if peak is None else peak.height
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
# If peer connects while we are syncing, check if they have the block we are syncing towards
peak_sync_hash = self.sync_store.get_sync_target_hash()
peak_sync_height = self.sync_store.get_sync_target_height()
if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None:
peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_sync_hash])
# Don't ask if we already know this peer has the peak
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondBlock] = await peer.request_block(
full_node_protocol.RequestBlock(uint32(peak_sync_height), False), timeout=10
)
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
peak_sync_hash,
peer.peer_node_id,
target_peak_response.block.weight,
peak_sync_height,
False,
)
else:
if request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
# This is the normal case of receiving the next block
if await self.short_sync_backtrack(
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
):
return
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# This is the case of syncing up more than a few blocks, at the start of the chain
# TODO(almog): fix weight proofs so they work at the beginning as well
self.log.debug("Doing batch sync, no backup")
await self.short_sync_batch(peer, uint32(0), request.height)
return
if request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]:
# This case of being behind but not by so much
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
return
# This is the either the case where we were not able to sync successfully (for example, due to the fork
# point being in the past), or we are very far behind. Performs a long sync.
self._sync_task = asyncio.create_task(self._sync())
async def send_peak_to_timelords(
self, peak_block: Optional[FullBlock] = None, peer: Optional[ws.WSChiaConnection] = None
):
"""
Sends current peak to timelords
"""
if peak_block is None:
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.block_record(peak_block.header_hash)
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
curr = peak
passed_ses_height_but_not_yet_included = True
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
if curr.sub_epoch_summary_included:
passed_ses_height_but_not_yet_included = False
curr = self.blockchain.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included or curr.height == 0:
passed_ses_height_but_not_yet_included = False
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
peak_block.reward_chain_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
passed_ses_height_but_not_yet_included,
)
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
if peer is None:
await self.server.send_to_all([msg], NodeType.TIMELORD)
else:
await peer.new_peak_timelord(timelord_new_peak)
async def synced(self) -> bool:
curr: Optional[BlockRecord] = self.blockchain.get_peak()
if curr is None:
return False
while curr is not None and not curr.is_transaction_block:
curr = self.blockchain.try_block_record(curr.prev_hash)
now = time.time()
if (
curr is None
or curr.timestamp is None
or curr.timestamp < uint64(int(now - 60 * 7))
or self.sync_store.get_sync_mode()
):
return False
else:
return True
async def on_connect(self, connection: ws.WSChiaConnection):
"""
Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers
and challenges to timelords.
"""
self._state_changed("add_connection")
self._state_changed("sync_mode")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if self.initialized is False:
return
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
synced = await self.synced()
peak_height = self.blockchain.get_peak_height()
if synced and peak_height is not None and peak_height > self.constants.INITIAL_FREEZE_PERIOD:
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
peak_full.reward_chain_block.get_unfinished().get_hash(),
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
self._state_changed("sync_mode")
if self.sync_store is not None:
self.sync_store.peer_disconnected(connection.peer_node_id)
def _num_needed_peers(self) -> int:
assert self.server is not None
assert self.server.all_connections is not None
diff = self.config["target_peer_count"] - len(self.server.all_connections)
return diff if diff >= 0 else 0
def _close(self):
self._shut_down = True
if self.blockchain is not None:
self.blockchain.shut_down()
if self.mempool_manager is not None:
self.mempool_manager.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
if self.uncompact_task is not None:
self.uncompact_task.cancel()
async def _await_closed(self):
try:
if self._sync_task is not None:
self._sync_task.cancel()
except asyncio.TimeoutError:
pass
await self.connection.close()
async def _sync(self):
"""
Performs a full sync of the blockchain up to the peak.
- Wait a few seconds for peers to send us their peaks
- Select the heaviest peak, and request a weight proof from a peer with that peak
- Validate the weight proof, and disconnect from the peer if invalid
- Find the fork point to see where to start downloading blocks
- Download blocks in batch (and in parallel) and verify them one at a time
- Disconnect peers that provide invalid blocks or don't have the blocks
"""
# Ensure we are only syncing once and not double calling this method
if self.sync_store.get_sync_mode():
return
self.sync_store.set_sync_mode(True)
self._state_changed("sync_mode")
try:
self.log.info("Starting to perform sync.")
self.log.info("Waiting to receive peaks from peers.")
# Wait until we have 3 peaks or up to a max of 30 seconds
peaks = []
for i in range(300):
peaks = [tup[0] for tup in self.sync_store.get_peak_of_each_peer().values()]
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
if self._shut_down:
return
await asyncio.sleep(0.1)
self.log.info(f"Collected a total of {len(peaks)} peaks.")
self.sync_peers_handler = None
# Based on responses from peers about the current peaks, see which peak is the heaviest
# (similar to longest chain rule).
target_peak = self.sync_store.get_heaviest_peak()
if target_peak is None:
raise RuntimeError("Not performing sync, no peaks collected")
heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight = target_peak
self.sync_store.set_peak_target(heaviest_peak_hash, heaviest_peak_height)
self.log.info(f"Selected peak {heaviest_peak_height}, {heaviest_peak_hash}")
# Check which peers are updated to this height
peers = []
coroutines = []
for peer in self.server.all_connections.values():
if peer.connection_type == NodeType.FULL_NODE:
peers.append(peer.peer_node_id)
coroutines.append(
peer.request_block(
full_node_protocol.RequestBlock(uint32(heaviest_peak_height), True), timeout=10
)
)
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
heaviest_peak_hash, peers[i], heaviest_peak_weight, heaviest_peak_height, False
)
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([heaviest_peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
# Request weight proof from a random peer
self.log.info(f"Total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
weight_proof_peer = random.choice(peers_with_peak)
self.log.info(
f"Requesting weight proof from peer {weight_proof_peer.peer_host} up to height"
f" {heaviest_peak_height}"
)
if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight:
raise ValueError("Not performing sync, already caught up.")
request = full_node_protocol.RequestProofOfWeight(heaviest_peak_height, heaviest_peak_hash)
response = await weight_proof_peer.request_proof_of_weight(request, timeout=180)
# Disconnect from this peer, because they have not behaved properly
if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight):
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.height != heaviest_peak_height:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.weight != heaviest_peak_weight:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_host}")
try:
validated, fork_point = await self.weight_proof_handler.validate_weight_proof(response.wp)
except Exception as e:
await weight_proof_peer.close(600)
raise ValueError(f"Weight proof validation threw an error {e}")
if not validated:
await weight_proof_peer.close(600)
raise ValueError("Weight proof validation failed")
self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
# Ensures that the fork point does not change
async with self.blockchain.lock:
await self.blockchain.warmup(fork_point)
await self.sync_from_fork_point(fork_point, heaviest_peak_height, heaviest_peak_hash)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
async def sync_from_fork_point(self, fork_point_height: int, target_peak_sb_height: uint32, peak_hash: bytes32):
self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}")
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
if len(peers_with_peak) == 0:
raise RuntimeError(f"Not syncing, no peers with header_hash {peak_hash} ")
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(fork_point_height, target_peak_sb_height, batch_size):
start_height = i
end_height = min(target_peak_sb_height, start_height + batch_size)
request = RequestBlocks(uint32(start_height), uint32(end_height), True)
self.log.info(f"Requesting blocks: {start_height} to {end_height}")
batch_added = False
to_remove = []
for peer in peers_with_peak:
if peer.closed:
to_remove.append(peer)
continue
response = await peer.request_blocks(request, timeout=15)
if response is None:
await peer.close()
to_remove.append(peer)
continue
if isinstance(response, RejectBlocks):
to_remove.append(peer)
continue
elif isinstance(response, RespondBlocks):
success, advanced_peak, _ = await self.receive_block_batch(
response.blocks, peer, None if advanced_peak else uint32(fork_point_height)
)
if success is False:
await peer.close()
continue
else:
batch_added = True
break
peak = self.blockchain.get_peak()
assert peak is not None
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
uint32(max(peak.height - 1, uint32(0))),
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
for peer in to_remove:
peers_with_peak.remove(peer)
if self.sync_store.peers_changed.is_set():
peer_ids = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
self.log.info(f"Number of peers we are syncing from: {len(peers_with_peak)}")
self.sync_store.peers_changed.clear()
if batch_added is False:
self.log.info(f"Failed to fetch blocks {start_height} to {end_height} from peers: {peers_with_peak}")
break
else:
self.log.info(f"Added blocks {start_height} to {end_height}")
self.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def receive_block_batch(
self, all_blocks: List[FullBlock], peer: ws.WSChiaConnection, fork_point: Optional[uint32]
) -> Tuple[bool, bool, Optional[uint32]]:
advanced_peak = False
fork_height: Optional[uint32] = uint32(0)
blocks_to_validate: List[FullBlock] = []
for i, block in enumerate(all_blocks):
if not self.blockchain.contains_block(block.header_hash):
blocks_to_validate = all_blocks[i:]
break
if len(blocks_to_validate) == 0:
return True, False, fork_height
pre_validate_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate)
self.log.debug(f"Block pre-validation time: {time.time() - pre_validate_start}")
if pre_validation_results is None:
return False, False, None
for i, block in enumerate(blocks_to_validate):
if pre_validation_results[i].error is not None:
self.log.error(
f"Invalid block from peer: {peer.get_peer_info()} {Err(pre_validation_results[i].error)}"
)
return False, advanced_peak, fork_height
assert pre_validation_results[i].required_iters is not None
(result, error, fork_height,) = await self.blockchain.receive_block(
block, pre_validation_results[i], None if advanced_peak else fork_point
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
elif result == ReceiveBlockResult.INVALID_BLOCK or result == ReceiveBlockResult.DISCONNECTED_BLOCK:
if error is not None:
self.log.error(f"Error: {error}, Invalid block from peer: {peer.get_peer_info()} ")
return False, advanced_peak, fork_height
block_record = self.blockchain.block_record(block.header_hash)
if block_record.sub_epoch_summary_included is not None:
await self.weight_proof_handler.create_prev_sub_epoch_segments()
if advanced_peak:
self._state_changed("new_peak")
self.log.debug(
f"Total time for {len(blocks_to_validate)} blocks: {time.time() - pre_validate_start}, "
f"advanced: {advanced_peak}"
)
return True, advanced_peak, fork_height
async def _finish_sync(self):
"""
Finalize sync by setting sync mode to False, clearing all sync information, and adding any final
blocks that we have finalized recently.
"""
self.sync_store.set_sync_mode(False)
self._state_changed("sync_mode")
if self.server is None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
async with self.blockchain.lock:
await self.sync_store.clear_sync_info()
peak_fb: FullBlock = await self.blockchain.get_full_peak()
if peak is not None:
await self.peak_post_processing(peak_fb, peak, peak.height - 1, None)
if peak is not None:
await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)
self._state_changed("block")
def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]):
if (
block.foliage.foliage_block_data.pool_target
== PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0))
and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE
and block.reward_chain_block.proof_of_space.pool_public_key is not None
):
if not AugSchemeMPL.verify(
block.reward_chain_block.proof_of_space.pool_public_key,
bytes(block.foliage.foliage_block_data.pool_target),
block.foliage.foliage_block_data.pool_signature,
):
return False
return True
async def peak_post_processing(
self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection]
):
"""
Must be called under self.blockchain.lock. This updates the internal state of the full node with the
latest peak information. It also notifies peers about the new peak.
"""
difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)
sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)
self.log.info(
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
f"hh {record.header_hash}, "
f"forked at {fork_height}, rh: {record.reward_infusion_new_challenge}, "
f"total iters: {record.total_iters}, "
f"overflow: {record.overflow}, "
f"deficit: {record.deficit}, "
f"difficulty: {difficulty}, "
f"sub slot iters: {sub_slot_iters}"
)
sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)
assert sub_slots is not None
if not self.sync_store.get_sync_mode():
self.blockchain.clean_block_records()
added_eos, new_sps, new_ips = self.full_node_store.new_peak(
record,
block,
sub_slots[0],
sub_slots[1],
fork_height != block.height - 1 and block.height != 0,
self.blockchain,
)
if sub_slots[1] is None:
assert record.ip_sub_slot_total_iters(self.constants) == 0
# Ensure the signage point is also in the store, for consistency
self.full_node_store.new_signage_point(
record.signage_point_index,
self.blockchain,
record,
record.sub_slot_iters,
SignagePoint(
block.reward_chain_block.challenge_chain_sp_vdf,
block.challenge_chain_sp_proof,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_sp_proof,
),
skip_vdf_validation=True,
)
# Update the mempool (returns successful pending transactions added to the mempool)
for bundle, result, spend_name in await self.mempool_manager.new_peak(self.blockchain.get_peak()):
self.log.debug(f"Added transaction to mempool: {spend_name}")
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert result.cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
result.cost,
uint64(bundle.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# If there were pending end of slots that happen after this peak, broadcast them if they are added
if added_eos is not None:
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
added_eos.challenge_chain.get_hash(),
uint8(0),
added_eos.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# TODO: maybe add and broadcast new SP/IPs as well?
if record.height % 1000 == 0:
# Occasionally clear the seen list to keep it small
self.full_node_store.clear_seen_unfinished_blocks()
if self.sync_store.get_sync_mode() is False:
await self.send_peak_to_timelords(block)
# Tell full nodes about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak,
full_node_protocol.NewPeak(
record.header_hash,
record.height,
record.weight,
fork_height,
block.reward_chain_block.get_unfinished().get_hash(),
),
)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Tell wallets about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
record.header_hash,
record.height,
record.weight,
fork_height,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
self._state_changed("new_peak")
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: Optional[ws.WSChiaConnection] = None,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
block: FullBlock = respond_block.block
if self.sync_store.get_sync_mode():
return None
# Adds the block to seen, and check if it's seen before (which means header is in memory)
header_hash = block.header_hash
if self.blockchain.contains_block(header_hash):
return None
pre_validation_result: Optional[PreValidationResult] = None
if block.is_transaction_block() and block.transactions_generator is None:
# This is the case where we already had the unfinished block, and asked for this block without
# the transactions (since we already had them). Therefore, here we add the transactions.
unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()
unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)
if unf_block is not None and unf_block.transactions_generator is not None:
pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)
assert pre_validation_result is not None
block = dataclasses.replace(block, transactions_generator=unf_block.transactions_generator)
async with self.blockchain.lock:
# After acquiring the lock, check again, because another asyncio thread might have added it
if self.blockchain.contains_block(header_hash):
return None
validation_start = time.time()
# Tries to add the block to the blockchain, if we already validated transactions, don't do it again
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing([block], pre_validation_result is None)
if pre_validation_results is None:
raise ValueError(f"Failed to validate block {header_hash} height {block.height}")
if pre_validation_results[0].error is not None:
if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:
added: ReceiveBlockResult = ReceiveBlockResult.DISCONNECTED_BLOCK
error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH
fork_height: Optional[uint32] = None
else:
raise ValueError(
f"Failed to validate block {header_hash} height "
f"{block.height}: {Err(pre_validation_results[0].error).name}"
)
else:
result_to_validate = (
pre_validation_results[0] if pre_validation_result is None else pre_validation_result
)
assert result_to_validate.required_iters == pre_validation_results[0].required_iters
added, error_code, fork_height = await self.blockchain.receive_block(block, result_to_validate, None)
validation_time = time.time() - validation_start
if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
return None
elif added == ReceiveBlockResult.INVALID_BLOCK:
assert error_code is not None
self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.")
raise ConsensusError(error_code, header_hash)
elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
self.log.info(f"Disconnected block {header_hash} at height {block.height}")
return None
elif added == ReceiveBlockResult.NEW_PEAK:
# Only propagate blocks which extend the blockchain (becomes one of the heads)
new_peak: Optional[BlockRecord] = self.blockchain.get_peak()
assert new_peak is not None and fork_height is not None
self.log.debug(f"Validation time for peak: {validation_time}")
await self.peak_post_processing(block, new_peak, fork_height, peer)
elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
self.log.info(
f"Received orphan block of height {block.height} rh " f"{block.reward_chain_block.get_hash()}"
)
else:
# Should never reach here, all the cases are covered
raise RuntimeError(f"Invalid result from receive_block {added}")
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
peak = self.blockchain.get_peak()
assert peak is not None
# Removes all temporary data for old blocks
clear_height = uint32(max(0, peak.height - 50))
self.full_node_store.clear_candidate_blocks_below(clear_height)
self.full_node_store.clear_unfinished_blocks_below(clear_height)
if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():
await self.sync_store.clear_sync_info() # Occasionally clear sync peer info
self._state_changed("block")
return None
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: Optional[ws.WSChiaConnection],
farmed_block: bool = False,
):
"""
We have received an unfinished block, either created by us, or from another peer.
We can validate it and if it's a good block, propagate it to other peers and
timelords.
"""
block = respond_unfinished_block.unfinished_block
if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(
block.prev_header_hash
):
# No need to request the parent, since the peer will send it to us anyway, via NewPeak
self.log.debug("Received a disconnected unfinished block")
return
# Adds the unfinished block to seen, and check if it's seen before, to prevent
# processing it twice. This searches for the exact version of the unfinished block (there can be many different
# foliages for the same trunk). This is intentional, to prevent DOS attacks.
# Note that it does not require that this block was successfully processed
if self.full_node_store.seen_unfinished_block(block.get_hash()):
return
block_hash = block.reward_chain_block.get_hash()
# This searched for the trunk hash (unfinished reward hash). If we have already added a block with the same
# hash, return
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is not None:
if block.total_iters < peak.sp_total_iters(self.constants):
# This means this unfinished block is pretty far behind, it will not add weight to our chain
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
prev_b = self.blockchain.block_record(block.prev_header_hash)
# Count the blocks in sub slot, and check if it's a new epoch
if len(block.finished_sub_slots) > 0:
num_blocks_in_ss = 1 # Curr
else:
curr = self.blockchain.try_block_record(block.prev_header_hash)
num_blocks_in_ss = 2 # Curr and prev
while (curr is not None) and not curr.first_in_sub_slot:
curr = self.blockchain.try_block_record(curr.prev_hash)
num_blocks_in_ss += 1
if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:
# TODO: potentially allow overflow blocks here, which count for the next slot
self.log.warning("Too many blocks added, not adding block")
return
async with self.blockchain.lock:
# TODO: pre-validate VDFs outside of lock
validate_result = await self.blockchain.validate_unfinished_block(block)
if validate_result.error is not None:
raise ConsensusError(Err(validate_result.error))
assert validate_result.required_iters is not None
# Perform another check, in case we have already concurrently added the same unfinished block
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
height = uint32(0)
else:
height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
validate_result.required_iters,
block,
True,
)
self.full_node_store.add_unfinished_block(height, block, validate_result)
if farmed_block is True:
self.log.info(f"🍀 ️Farmed unfinished_block {block_hash}")
else:
self.log.info(f"Added unfinished_block {block_hash}, not farmed")
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(block.finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if block.reward_chain_block.signage_point_index == 0:
res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)
if res is None:
if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
rc_prev = self.constants.GENESIS_CHALLENGE
else:
self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}")
return
else:
rc_prev = res[0].reward_chain.get_hash()
else:
assert block.reward_chain_block.reward_chain_sp_vdf is not None
rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge
timelord_request = timelord_protocol.NewUnfinishedBlock(
block.reward_chain_block,
difficulty,
sub_slot_iters,
block.foliage,
ses,
rc_prev,
)
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, timelord_request)
await self.server.send_to_all([msg], NodeType.TIMELORD)
full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self._state_changed("unfinished_block")
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[ws.WSChiaConnection] = None
) -> Optional[Message]:
# Lookup unfinished blocks
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(
request.unfinished_reward_hash
)
if unfinished_block is None:
self.log.warning(
f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish."
)
return None
prev_b: Optional[BlockRecord] = None
target_rc_hash = request.reward_chain_ip_vdf.challenge
last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge
# Backtracks through end of slot objects, should work for multiple empty sub slots
for eos, _, _ in reversed(self.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == target_rc_hash:
target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge
if target_rc_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
# Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block
# and not an end of slot (since we just looked through the slots and backtracked)
curr: Optional[BlockRecord] = self.blockchain.get_peak()
for _ in range(10):
if curr is None:
break
if curr.reward_infusion_new_challenge == target_rc_hash:
# Found our prev block
prev_b = curr
break
curr = self.blockchain.try_block_record(curr.prev_hash)
# If not found, cache keyed on prev block
if prev_b is None:
self.full_node_store.add_to_future_ip(request)
self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}")
return None
finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
self.blockchain,
prev_b,
last_slot_cc_hash,
)
if finished_sub_slots is None:
return None
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
sub_slot_start_iters = uint128(0)
else:
ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash)
if ss_res is None:
self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None
_, _, sub_slot_start_iters = ss_res
sp_total_iters = uint128(
sub_slot_start_iters
+ calculate_sp_iters(
self.constants,
sub_slot_iters,
unfinished_block.reward_chain_block.signage_point_index,
)
)
block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
request.challenge_chain_ip_vdf,
request.challenge_chain_ip_proof,
request.reward_chain_ip_vdf,
request.reward_chain_ip_proof,
request.infused_challenge_chain_ip_vdf,
request.infused_challenge_chain_ip_proof,
finished_sub_slots,
prev_b,
self.blockchain,
sp_total_iters,
difficulty,
)
if not self.has_valid_pool_sig(block):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
try:
await self.respond_block(full_node_protocol.RespondBlock(block))
except Exception as e:
self.log.warning(f"Consensus error validating block: {e}")
if timelord_peer is not None:
# Only sends to the timelord who sent us this VDF, to reset them to the correct peak
await self.send_peak_to_timelords(peer=timelord_peer)
return None
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Tuple[Optional[Message], bool]:
fetched_ss = self.full_node_store.get_sub_slot(request.end_of_slot_bundle.challenge_chain.get_hash())
if fetched_ss is not None:
# Already have the sub-slot
return None, True
async with self.timelord_lock:
fetched_ss = self.full_node_store.get_sub_slot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
if (
(fetched_ss is None)
and request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
!= self.constants.GENESIS_CHALLENGE
):
# If we don't have the prev, request the prev instead
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
uint8(0),
bytes([0] * 32),
)
return (
make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request),
False,
)
peak = self.blockchain.get_peak()
if peak is not None and peak.height > 2:
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
else:
next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
next_difficulty = self.constants.DIFFICULTY_STARTING
# Adds the sub slot and potentially get new infusions
new_infusions = self.full_node_store.new_finished_sub_slot(
request.end_of_slot_bundle,
self.blockchain,
peak,
await self.blockchain.get_full_peak(),
)
# It may be an empty list, even if it's not None. Not None means added successfully
if new_infusions is not None:
self.log.info(
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
f"{request.end_of_slot_bundle.challenge_chain.get_hash()}, "
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
f"RC hash: {request.end_of_slot_bundle.reward_chain.get_hash()}, "
f"Deficit {request.end_of_slot_bundle.reward_chain.deficit}"
)
# Notify full nodes of the new sub-slot
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
request.end_of_slot_bundle.challenge_chain.get_hash(),
uint8(0),
request.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
for infusion in new_infusions:
await self.new_infusion_point_vdf(infusion)
# Notify farmers of the new sub-slot
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.reward_chain.get_hash(),
next_difficulty,
next_sub_slot_iters,
uint8(0),
)
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
return None, True
else:
self.log.info(
f"End of slot not added CC challenge "
f"{request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
)
return None, False
async def respond_transaction(
self,
transaction: SpendBundle,
spend_name: bytes32,
peer: Optional[ws.WSChiaConnection] = None,
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
if self.sync_store.get_sync_mode():
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
if not test and not (await self.synced()):
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
peak_height = self.blockchain.get_peak_height()
# No transactions in mempool in initial client. Remove 6 weeks after launch
if (
peak_height is None
or peak_height <= self.constants.INITIAL_FREEZE_PERIOD
or self.constants.NETWORK_TYPE == NetworkType.MAINNET
):
return MempoolInclusionStatus.FAILED, Err.INITIAL_TRANSACTION_FREEZE
if self.mempool_manager.seen(spend_name):
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
self.mempool_manager.add_and_maybe_pop_seen(spend_name)
self.log.debug(f"Processing transaction: {spend_name}")
# Ignore if syncing
if self.sync_store.get_sync_mode():
status = MempoolInclusionStatus.FAILED
error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING
else:
try:
cost_result = await self.mempool_manager.pre_validate_spendbundle(transaction)
except Exception as e:
self.mempool_manager.remove_seen(spend_name)
raise e
async with self.blockchain.lock:
if self.mempool_manager.get_spendbundle(spend_name) is not None:
self.mempool_manager.remove_seen(spend_name)
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
cost, status, error = await self.mempool_manager.add_spendbundle(transaction, cost_result, spend_name)
if status == MempoolInclusionStatus.SUCCESS:
self.log.debug(f"Added transaction to mempool: {spend_name}")
# Only broadcast successful transactions, not pending ones. Otherwise it's a DOS
# vector.
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
cost,
uint64(transaction.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
if peer is None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
else:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
self.mempool_manager.remove_seen(spend_name)
self.log.warning(
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
)
return status, error
async def _needs_compact_proof(
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
) -> bool:
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_SP_VDF:
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
return False
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
assert header_block.challenge_chain_sp_proof is not None
if (
header_block.challenge_chain_sp_proof.witness_type == 0
and header_block.challenge_chain_sp_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_IP_VDF:
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
if (
header_block.challenge_chain_ip_proof.witness_type == 0
and header_block.challenge_chain_ip_proof.normalized_to_identity
):
return False
return True
return False
async def _can_accept_compact_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
header_hash: bytes32,
field_vdf: CompressibleVDFField,
) -> bool:
"""
- Checks if the provided proof is indeed compact.
- Checks if proof verifies given the vdf_info from the start of sub-slot.
- Checks if the provided vdf_info is correct, assuming it refers to the start of sub-slot.
- Checks if the existing proof was non-compact. Ignore this proof if we already have a compact proof.
"""
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
if is_fully_compactified is None or is_fully_compactified:
self.log.info(f"Already compactified block: {header_hash}. Ignoring.")
return False
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"Received vdf proof is not compact: {vdf_proof}.")
return False
if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info):
self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.")
return False
header_block = await self.blockchain.get_header_block_by_height(height, header_hash)
if header_block is None:
self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}")
return False
is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf)
if not is_new_proof:
self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.")
return is_new_proof
async def _replace_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
field_vdf: CompressibleVDFField,
):
full_blocks = await self.block_store.get_full_blocks_at([height])
assert len(full_blocks) > 0
for block in full_blocks:
new_block = None
block_record = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(height))
assert block_record is not None
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.CC_SP_VDF:
assert block.challenge_chain_sp_proof is not None
new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof)
if field_vdf == CompressibleVDFField.CC_IP_VDF:
new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof)
assert new_block is not None
await self.block_store.add_full_block(new_block, block_record)
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
if is_fully_compactified is None or is_fully_compactified:
return False
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
field_vdf = CompressibleVDFField(int(request.field_vdf))
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
msg = make_msg(
ProtocolMessageTypes.request_compact_vdf,
full_node_protocol.RequestCompactVDF(
request.height, request.header_hash, request.field_vdf, request.vdf_info
),
)
await peer.send_message(msg)
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
vdf_proof: Optional[VDFProof] = None
field_vdf = CompressibleVDFField(int(request.field_vdf))
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
):
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
break
if (
field_vdf == CompressibleVDFField.CC_SP_VDF
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_sp_proof
if (
field_vdf == CompressibleVDFField.CC_IP_VDF
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_ip_proof
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
return
compact_vdf = full_node_protocol.RespondCompactVDF(
request.height,
request.header_hash,
request.field_vdf,
request.vdf_info,
vdf_proof,
)
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
await peer.send_message(msg)
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
return
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
async def broadcast_uncompact_blocks(self, uncompact_interval_scan: int, target_uncompact_proofs: int):
min_height: Optional[int] = 0
try:
while not self._shut_down:
while self.sync_store.get_sync_mode():
if self._shut_down:
return
await asyncio.sleep(30)
broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = []
new_min_height = None
max_height = self.blockchain.get_peak_height()
if max_height is None:
await asyncio.sleep(30)
continue
# Calculate 'min_height' correctly the first time this task is launched, using the db.
assert min_height is not None
min_height = await self.block_store.get_first_not_compactified(min_height)
if min_height is None or min_height > max(0, max_height - 1000):
min_height = max(0, max_height - 1000)
batches_finished = 0
self.log.info("Scanning the blockchain for uncompact blocks.")
for h in range(min_height, max_height, 100):
# Got 10 times the target header count, sampling the target headers should contain
# enough randomness to split the work between blueboxes.
if len(broadcast_list) > target_uncompact_proofs * 10:
break
stop_height = min(h + 99, max_height)
headers = await self.blockchain.get_header_blocks_in_range(min_height, stop_height)
for header in headers.values():
prev_broadcast_list_len = len(broadcast_list)
expected_header_hash = self.blockchain.height_to_hash(header.height)
if header.header_hash != expected_header_hash:
continue
for sub_slot in header.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_EOS_VDF),
)
)
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
assert sub_slot.infused_challenge_chain is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.ICC_EOS_VDF),
)
)
if header.challenge_chain_sp_proof is not None and (
header.challenge_chain_sp_proof.witness_type > 0
or not header.challenge_chain_sp_proof.normalized_to_identity
):
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_sp_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_SP_VDF),
)
)
if (
header.challenge_chain_ip_proof.witness_type > 0
or not header.challenge_chain_ip_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_ip_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_IP_VDF),
)
)
# This is the first header with uncompact proofs. Store its height so next time we iterate
# only from here. Fix header block iteration window to at least 1000, so reorgs will be
# handled correctly.
if prev_broadcast_list_len == 0 and len(broadcast_list) > 0 and h <= max(0, max_height - 1000):
new_min_height = header.height
# Small sleep between batches.
batches_finished += 1
if batches_finished % 10 == 0:
await asyncio.sleep(1)
# We have no uncompact blocks, but mentain the block iteration window to at least 1000 blocks.
if new_min_height is None:
new_min_height = max(0, max_height - 1000)
min_height = new_min_height
if len(broadcast_list) > target_uncompact_proofs:
random.shuffle(broadcast_list)
broadcast_list = broadcast_list[:target_uncompact_proofs]
if self.sync_store.get_sync_mode():
continue
if self.server is not None:
for new_pot in broadcast_list:
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
await self.server.send_to_all([msg], NodeType.TIMELORD)
await asyncio.sleep(uncompact_interval_scan)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
self.log.error(f"Exception Stack: {error_stack}")
| 49.367749 | 120 | 0.625814 | import asyncio
import dataclasses
import logging
import random
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import aiosqlite
from blspy import AugSchemeMPL
import src.server.ws_connection as ws
from src.consensus.block_creation import unfinished_block_to_full_block
from src.consensus.block_record import BlockRecord
from src.consensus.blockchain import Blockchain, ReceiveBlockResult
from src.consensus.constants import ConsensusConstants
from src.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from src.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from src.consensus.multiprocess_validation import PreValidationResult
from src.consensus.network_type import NetworkType
from src.consensus.pot_iterations import calculate_sp_iters
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_store import SyncStore
from src.full_node.weight_proof import WeightProofHandler
from src.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from src.protocols.full_node_protocol import RejectBlocks, RequestBlocks, RespondBlock, RespondBlocks
from src.protocols.protocol_message_types import ProtocolMessageTypes
from src.server.node_discovery import FullNodePeers
from src.server.outbound_message import Message, NodeType, make_msg
from src.server.server import ChiaServer
from src.types.blockchain_format.classgroup import ClassgroupElement
from src.types.blockchain_format.pool_target import PoolTarget
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from src.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
from src.types.full_block import FullBlock
from src.types.header_block import HeaderBlock
from src.types.mempool_inclusion_status import MempoolInclusionStatus
from src.types.spend_bundle import SpendBundle
from src.types.unfinished_block import UnfinishedBlock
from src.util.errors import ConsensusError, Err
from src.util.genesis_wait import wait_for_genesis_challenge
from src.util.ints import uint8, uint32, uint64, uint128
from src.util.path import mkdir, path_from_root
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
full_node_peers: Optional[FullNodePeers]
sync_store: Any
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.initialized = False
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False
self.constants = consensus_constants
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
self.full_node_peers = None
self.sync_store = None
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
self.db_path = path_from_root(root_path, db_path_replaced)
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def regular_start(self):
self.log.info("regular_start")
self.connection = await aiosqlite.connect(self.db_path)
self.block_store = await BlockStore.create(self.connection)
self.full_node_store = await FullNodeStore.create(self.constants)
self.sync_store = await SyncStore.create()
self.coin_store = await CoinStore.create(self.connection)
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().height}, "
f"time taken: {int(time_taken)}s"
)
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak())
assert len(pending_tx) == 0
peak: Optional[BlockRecord] = self.blockchain.get_peak()
self.uncompact_task = None
if peak is not None:
full_peak = await self.blockchain.get_full_peak()
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
if self.config["send_uncompact_interval"] != 0:
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
)
)
self.initialized = True
async def delayed_start(self):
self.log.info("delayed_start")
config, constants = await wait_for_genesis_challenge(self.root_path, self.constants, "full_node")
self.config = config
self.constants = constants
await self.regular_start()
async def _start(self):
self.timelord_lock = asyncio.Lock()
if self.constants.GENESIS_CHALLENGE is not None:
await self.regular_start()
else:
asyncio.create_task(self.delayed_start())
def set_server(self, server: ChiaServer):
self.server = server
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.full_node_peers.start())
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def short_sync_batch(self, peer: ws.WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
if (
peer.peer_node_id in self.sync_store.backtrack_syncing
and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0
):
return True # Don't batch sync, we are already in progress of a backtrack sync
if peer.peer_node_id in self.sync_store.batch_syncing:
return True
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False))
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}")
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
# First sb not connected to our blockchain, do a long sync instead
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
try:
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.request_blocks(request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.lock:
success, advanced_peak, fork_height = await self.receive_block_batch(response.blocks, peer, None)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if advanced_peak:
peak = self.blockchain.get_peak()
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak is not None and peak_fb is not None and fork_height is not None
await self.peak_post_processing(peak_fb, peak, fork_height, peer)
self.log.info(f"Added blocks {height}-{end_height}")
except Exception:
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
async def short_sync_backtrack(
self, peer: ws.WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
):
try:
if peer.peer_node_id not in self.sync_store.backtrack_syncing:
self.sync_store.backtrack_syncing[peer.peer_node_id] = 0
self.sync_store.backtrack_syncing[peer.peer_node_id] += 1
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
responses = []
while curr_height > peak_height - 5:
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx))
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, wrong type {type(curr)}"
)
responses.append(curr)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for response in reversed(responses):
await self.respond_block(response)
except Exception as e:
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
raise e
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
return found_fork_point
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection):
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
if self.blockchain.contains_block(request.header_hash):
return None
peak: Optional[BlockRecord] = self.blockchain.get_peak()
curr_peak_height = uint32(0) if peak is None else peak.height
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
peak_sync_hash = self.sync_store.get_sync_target_hash()
peak_sync_height = self.sync_store.get_sync_target_height()
if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None:
peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_sync_hash])
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondBlock] = await peer.request_block(
full_node_protocol.RequestBlock(uint32(peak_sync_height), False), timeout=10
)
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
peak_sync_hash,
peer.peer_node_id,
target_peak_response.block.weight,
peak_sync_height,
False,
)
else:
if request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
# This is the normal case of receiving the next block
if await self.short_sync_backtrack(
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
):
return
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# This is the case of syncing up more than a few blocks, at the start of the chain
# TODO(almog): fix weight proofs so they work at the beginning as well
self.log.debug("Doing batch sync, no backup")
await self.short_sync_batch(peer, uint32(0), request.height)
return
if request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]:
# This case of being behind but not by so much
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
return
# This is the either the case where we were not able to sync successfully (for example, due to the fork
# point being in the past), or we are very far behind. Performs a long sync.
self._sync_task = asyncio.create_task(self._sync())
async def send_peak_to_timelords(
self, peak_block: Optional[FullBlock] = None, peer: Optional[ws.WSChiaConnection] = None
):
if peak_block is None:
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.block_record(peak_block.header_hash)
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
curr = peak
passed_ses_height_but_not_yet_included = True
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
if curr.sub_epoch_summary_included:
passed_ses_height_but_not_yet_included = False
curr = self.blockchain.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included or curr.height == 0:
passed_ses_height_but_not_yet_included = False
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
peak_block.reward_chain_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
passed_ses_height_but_not_yet_included,
)
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
if peer is None:
await self.server.send_to_all([msg], NodeType.TIMELORD)
else:
await peer.new_peak_timelord(timelord_new_peak)
async def synced(self) -> bool:
curr: Optional[BlockRecord] = self.blockchain.get_peak()
if curr is None:
return False
while curr is not None and not curr.is_transaction_block:
curr = self.blockchain.try_block_record(curr.prev_hash)
now = time.time()
if (
curr is None
or curr.timestamp is None
or curr.timestamp < uint64(int(now - 60 * 7))
or self.sync_store.get_sync_mode()
):
return False
else:
return True
async def on_connect(self, connection: ws.WSChiaConnection):
self._state_changed("add_connection")
self._state_changed("sync_mode")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if self.initialized is False:
return
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
synced = await self.synced()
peak_height = self.blockchain.get_peak_height()
if synced and peak_height is not None and peak_height > self.constants.INITIAL_FREEZE_PERIOD:
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
peak_full.reward_chain_block.get_unfinished().get_hash(),
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
self._state_changed("sync_mode")
if self.sync_store is not None:
self.sync_store.peer_disconnected(connection.peer_node_id)
def _num_needed_peers(self) -> int:
assert self.server is not None
assert self.server.all_connections is not None
diff = self.config["target_peer_count"] - len(self.server.all_connections)
return diff if diff >= 0 else 0
def _close(self):
self._shut_down = True
if self.blockchain is not None:
self.blockchain.shut_down()
if self.mempool_manager is not None:
self.mempool_manager.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
if self.uncompact_task is not None:
self.uncompact_task.cancel()
async def _await_closed(self):
try:
if self._sync_task is not None:
self._sync_task.cancel()
except asyncio.TimeoutError:
pass
await self.connection.close()
async def _sync(self):
# Ensure we are only syncing once and not double calling this method
if self.sync_store.get_sync_mode():
return
self.sync_store.set_sync_mode(True)
self._state_changed("sync_mode")
try:
self.log.info("Starting to perform sync.")
self.log.info("Waiting to receive peaks from peers.")
# Wait until we have 3 peaks or up to a max of 30 seconds
peaks = []
for i in range(300):
peaks = [tup[0] for tup in self.sync_store.get_peak_of_each_peer().values()]
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
if self._shut_down:
return
await asyncio.sleep(0.1)
self.log.info(f"Collected a total of {len(peaks)} peaks.")
self.sync_peers_handler = None
# Based on responses from peers about the current peaks, see which peak is the heaviest
# (similar to longest chain rule).
target_peak = self.sync_store.get_heaviest_peak()
if target_peak is None:
raise RuntimeError("Not performing sync, no peaks collected")
heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight = target_peak
self.sync_store.set_peak_target(heaviest_peak_hash, heaviest_peak_height)
self.log.info(f"Selected peak {heaviest_peak_height}, {heaviest_peak_hash}")
# Check which peers are updated to this height
peers = []
coroutines = []
for peer in self.server.all_connections.values():
if peer.connection_type == NodeType.FULL_NODE:
peers.append(peer.peer_node_id)
coroutines.append(
peer.request_block(
full_node_protocol.RequestBlock(uint32(heaviest_peak_height), True), timeout=10
)
)
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
heaviest_peak_hash, peers[i], heaviest_peak_weight, heaviest_peak_height, False
)
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([heaviest_peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
# Request weight proof from a random peer
self.log.info(f"Total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
weight_proof_peer = random.choice(peers_with_peak)
self.log.info(
f"Requesting weight proof from peer {weight_proof_peer.peer_host} up to height"
f" {heaviest_peak_height}"
)
if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight:
raise ValueError("Not performing sync, already caught up.")
request = full_node_protocol.RequestProofOfWeight(heaviest_peak_height, heaviest_peak_hash)
response = await weight_proof_peer.request_proof_of_weight(request, timeout=180)
# Disconnect from this peer, because they have not behaved properly
if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight):
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.height != heaviest_peak_height:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.weight != heaviest_peak_weight:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_host}")
try:
validated, fork_point = await self.weight_proof_handler.validate_weight_proof(response.wp)
except Exception as e:
await weight_proof_peer.close(600)
raise ValueError(f"Weight proof validation threw an error {e}")
if not validated:
await weight_proof_peer.close(600)
raise ValueError("Weight proof validation failed")
self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
# Ensures that the fork point does not change
async with self.blockchain.lock:
await self.blockchain.warmup(fork_point)
await self.sync_from_fork_point(fork_point, heaviest_peak_height, heaviest_peak_hash)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
async def sync_from_fork_point(self, fork_point_height: int, target_peak_sb_height: uint32, peak_hash: bytes32):
self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}")
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
if len(peers_with_peak) == 0:
raise RuntimeError(f"Not syncing, no peers with header_hash {peak_hash} ")
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(fork_point_height, target_peak_sb_height, batch_size):
start_height = i
end_height = min(target_peak_sb_height, start_height + batch_size)
request = RequestBlocks(uint32(start_height), uint32(end_height), True)
self.log.info(f"Requesting blocks: {start_height} to {end_height}")
batch_added = False
to_remove = []
for peer in peers_with_peak:
if peer.closed:
to_remove.append(peer)
continue
response = await peer.request_blocks(request, timeout=15)
if response is None:
await peer.close()
to_remove.append(peer)
continue
if isinstance(response, RejectBlocks):
to_remove.append(peer)
continue
elif isinstance(response, RespondBlocks):
success, advanced_peak, _ = await self.receive_block_batch(
response.blocks, peer, None if advanced_peak else uint32(fork_point_height)
)
if success is False:
await peer.close()
continue
else:
batch_added = True
break
peak = self.blockchain.get_peak()
assert peak is not None
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
uint32(max(peak.height - 1, uint32(0))),
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
for peer in to_remove:
peers_with_peak.remove(peer)
if self.sync_store.peers_changed.is_set():
peer_ids = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
self.log.info(f"Number of peers we are syncing from: {len(peers_with_peak)}")
self.sync_store.peers_changed.clear()
if batch_added is False:
self.log.info(f"Failed to fetch blocks {start_height} to {end_height} from peers: {peers_with_peak}")
break
else:
self.log.info(f"Added blocks {start_height} to {end_height}")
self.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def receive_block_batch(
self, all_blocks: List[FullBlock], peer: ws.WSChiaConnection, fork_point: Optional[uint32]
) -> Tuple[bool, bool, Optional[uint32]]:
advanced_peak = False
fork_height: Optional[uint32] = uint32(0)
blocks_to_validate: List[FullBlock] = []
for i, block in enumerate(all_blocks):
if not self.blockchain.contains_block(block.header_hash):
blocks_to_validate = all_blocks[i:]
break
if len(blocks_to_validate) == 0:
return True, False, fork_height
pre_validate_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate)
self.log.debug(f"Block pre-validation time: {time.time() - pre_validate_start}")
if pre_validation_results is None:
return False, False, None
for i, block in enumerate(blocks_to_validate):
if pre_validation_results[i].error is not None:
self.log.error(
f"Invalid block from peer: {peer.get_peer_info()} {Err(pre_validation_results[i].error)}"
)
return False, advanced_peak, fork_height
assert pre_validation_results[i].required_iters is not None
(result, error, fork_height,) = await self.blockchain.receive_block(
block, pre_validation_results[i], None if advanced_peak else fork_point
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
elif result == ReceiveBlockResult.INVALID_BLOCK or result == ReceiveBlockResult.DISCONNECTED_BLOCK:
if error is not None:
self.log.error(f"Error: {error}, Invalid block from peer: {peer.get_peer_info()} ")
return False, advanced_peak, fork_height
block_record = self.blockchain.block_record(block.header_hash)
if block_record.sub_epoch_summary_included is not None:
await self.weight_proof_handler.create_prev_sub_epoch_segments()
if advanced_peak:
self._state_changed("new_peak")
self.log.debug(
f"Total time for {len(blocks_to_validate)} blocks: {time.time() - pre_validate_start}, "
f"advanced: {advanced_peak}"
)
return True, advanced_peak, fork_height
async def _finish_sync(self):
self.sync_store.set_sync_mode(False)
self._state_changed("sync_mode")
if self.server is None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
async with self.blockchain.lock:
await self.sync_store.clear_sync_info()
peak_fb: FullBlock = await self.blockchain.get_full_peak()
if peak is not None:
await self.peak_post_processing(peak_fb, peak, peak.height - 1, None)
if peak is not None:
await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)
self._state_changed("block")
def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]):
if (
block.foliage.foliage_block_data.pool_target
== PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0))
and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE
and block.reward_chain_block.proof_of_space.pool_public_key is not None
):
if not AugSchemeMPL.verify(
block.reward_chain_block.proof_of_space.pool_public_key,
bytes(block.foliage.foliage_block_data.pool_target),
block.foliage.foliage_block_data.pool_signature,
):
return False
return True
async def peak_post_processing(
self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection]
):
difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)
sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)
self.log.info(
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
f"hh {record.header_hash}, "
f"forked at {fork_height}, rh: {record.reward_infusion_new_challenge}, "
f"total iters: {record.total_iters}, "
f"overflow: {record.overflow}, "
f"deficit: {record.deficit}, "
f"difficulty: {difficulty}, "
f"sub slot iters: {sub_slot_iters}"
)
sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)
assert sub_slots is not None
if not self.sync_store.get_sync_mode():
self.blockchain.clean_block_records()
added_eos, new_sps, new_ips = self.full_node_store.new_peak(
record,
block,
sub_slots[0],
sub_slots[1],
fork_height != block.height - 1 and block.height != 0,
self.blockchain,
)
if sub_slots[1] is None:
assert record.ip_sub_slot_total_iters(self.constants) == 0
# Ensure the signage point is also in the store, for consistency
self.full_node_store.new_signage_point(
record.signage_point_index,
self.blockchain,
record,
record.sub_slot_iters,
SignagePoint(
block.reward_chain_block.challenge_chain_sp_vdf,
block.challenge_chain_sp_proof,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_sp_proof,
),
skip_vdf_validation=True,
)
# Update the mempool (returns successful pending transactions added to the mempool)
for bundle, result, spend_name in await self.mempool_manager.new_peak(self.blockchain.get_peak()):
self.log.debug(f"Added transaction to mempool: {spend_name}")
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert result.cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
result.cost,
uint64(bundle.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# If there were pending end of slots that happen after this peak, broadcast them if they are added
if added_eos is not None:
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
added_eos.challenge_chain.get_hash(),
uint8(0),
added_eos.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# TODO: maybe add and broadcast new SP/IPs as well?
if record.height % 1000 == 0:
# Occasionally clear the seen list to keep it small
self.full_node_store.clear_seen_unfinished_blocks()
if self.sync_store.get_sync_mode() is False:
await self.send_peak_to_timelords(block)
# Tell full nodes about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak,
full_node_protocol.NewPeak(
record.header_hash,
record.height,
record.weight,
fork_height,
block.reward_chain_block.get_unfinished().get_hash(),
),
)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Tell wallets about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
record.header_hash,
record.height,
record.weight,
fork_height,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
self._state_changed("new_peak")
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: Optional[ws.WSChiaConnection] = None,
) -> Optional[Message]:
block: FullBlock = respond_block.block
if self.sync_store.get_sync_mode():
return None
# Adds the block to seen, and check if it's seen before (which means header is in memory)
header_hash = block.header_hash
if self.blockchain.contains_block(header_hash):
return None
pre_validation_result: Optional[PreValidationResult] = None
if block.is_transaction_block() and block.transactions_generator is None:
unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()
unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)
if unf_block is not None and unf_block.transactions_generator is not None:
pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)
assert pre_validation_result is not None
block = dataclasses.replace(block, transactions_generator=unf_block.transactions_generator)
async with self.blockchain.lock:
if self.blockchain.contains_block(header_hash):
return None
validation_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing([block], pre_validation_result is None)
if pre_validation_results is None:
raise ValueError(f"Failed to validate block {header_hash} height {block.height}")
if pre_validation_results[0].error is not None:
if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:
added: ReceiveBlockResult = ReceiveBlockResult.DISCONNECTED_BLOCK
error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH
fork_height: Optional[uint32] = None
else:
raise ValueError(
f"Failed to validate block {header_hash} height "
f"{block.height}: {Err(pre_validation_results[0].error).name}"
)
else:
result_to_validate = (
pre_validation_results[0] if pre_validation_result is None else pre_validation_result
)
assert result_to_validate.required_iters == pre_validation_results[0].required_iters
added, error_code, fork_height = await self.blockchain.receive_block(block, result_to_validate, None)
validation_time = time.time() - validation_start
if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
return None
elif added == ReceiveBlockResult.INVALID_BLOCK:
assert error_code is not None
self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.")
raise ConsensusError(error_code, header_hash)
elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
self.log.info(f"Disconnected block {header_hash} at height {block.height}")
return None
elif added == ReceiveBlockResult.NEW_PEAK:
# Only propagate blocks which extend the blockchain (becomes one of the heads)
new_peak: Optional[BlockRecord] = self.blockchain.get_peak()
assert new_peak is not None and fork_height is not None
self.log.debug(f"Validation time for peak: {validation_time}")
await self.peak_post_processing(block, new_peak, fork_height, peer)
elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
self.log.info(
f"Received orphan block of height {block.height} rh " f"{block.reward_chain_block.get_hash()}"
)
else:
# Should never reach here, all the cases are covered
raise RuntimeError(f"Invalid result from receive_block {added}")
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
peak = self.blockchain.get_peak()
assert peak is not None
# Removes all temporary data for old blocks
clear_height = uint32(max(0, peak.height - 50))
self.full_node_store.clear_candidate_blocks_below(clear_height)
self.full_node_store.clear_unfinished_blocks_below(clear_height)
if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():
await self.sync_store.clear_sync_info() # Occasionally clear sync peer info
self._state_changed("block")
return None
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: Optional[ws.WSChiaConnection],
farmed_block: bool = False,
):
block = respond_unfinished_block.unfinished_block
if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(
block.prev_header_hash
):
# No need to request the parent, since the peer will send it to us anyway, via NewPeak
self.log.debug("Received a disconnected unfinished block")
return
# Adds the unfinished block to seen, and check if it's seen before, to prevent
if self.full_node_store.seen_unfinished_block(block.get_hash()):
return
block_hash = block.reward_chain_block.get_hash()
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is not None:
if block.total_iters < peak.sp_total_iters(self.constants):
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
prev_b = self.blockchain.block_record(block.prev_header_hash)
if len(block.finished_sub_slots) > 0:
num_blocks_in_ss = 1 # Curr
else:
curr = self.blockchain.try_block_record(block.prev_header_hash)
num_blocks_in_ss = 2 # Curr and prev
while (curr is not None) and not curr.first_in_sub_slot:
curr = self.blockchain.try_block_record(curr.prev_hash)
num_blocks_in_ss += 1
if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:
# TODO: potentially allow overflow blocks here, which count for the next slot
self.log.warning("Too many blocks added, not adding block")
return
async with self.blockchain.lock:
# TODO: pre-validate VDFs outside of lock
validate_result = await self.blockchain.validate_unfinished_block(block)
if validate_result.error is not None:
raise ConsensusError(Err(validate_result.error))
assert validate_result.required_iters is not None
# Perform another check, in case we have already concurrently added the same unfinished block
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
height = uint32(0)
else:
height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
validate_result.required_iters,
block,
True,
)
self.full_node_store.add_unfinished_block(height, block, validate_result)
if farmed_block is True:
self.log.info(f"🍀 ️Farmed unfinished_block {block_hash}")
else:
self.log.info(f"Added unfinished_block {block_hash}, not farmed")
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(block.finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if block.reward_chain_block.signage_point_index == 0:
res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)
if res is None:
if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
rc_prev = self.constants.GENESIS_CHALLENGE
else:
self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}")
return
else:
rc_prev = res[0].reward_chain.get_hash()
else:
assert block.reward_chain_block.reward_chain_sp_vdf is not None
rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge
timelord_request = timelord_protocol.NewUnfinishedBlock(
block.reward_chain_block,
difficulty,
sub_slot_iters,
block.foliage,
ses,
rc_prev,
)
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, timelord_request)
await self.server.send_to_all([msg], NodeType.TIMELORD)
full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self._state_changed("unfinished_block")
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[ws.WSChiaConnection] = None
) -> Optional[Message]:
# Lookup unfinished blocks
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(
request.unfinished_reward_hash
)
if unfinished_block is None:
self.log.warning(
f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish."
)
return None
prev_b: Optional[BlockRecord] = None
target_rc_hash = request.reward_chain_ip_vdf.challenge
last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge
# Backtracks through end of slot objects, should work for multiple empty sub slots
for eos, _, _ in reversed(self.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == target_rc_hash:
target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge
if target_rc_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
# Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block
# and not an end of slot (since we just looked through the slots and backtracked)
curr: Optional[BlockRecord] = self.blockchain.get_peak()
for _ in range(10):
if curr is None:
break
if curr.reward_infusion_new_challenge == target_rc_hash:
# Found our prev block
prev_b = curr
break
curr = self.blockchain.try_block_record(curr.prev_hash)
# If not found, cache keyed on prev block
if prev_b is None:
self.full_node_store.add_to_future_ip(request)
self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}")
return None
finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
self.blockchain,
prev_b,
last_slot_cc_hash,
)
if finished_sub_slots is None:
return None
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
sub_slot_start_iters = uint128(0)
else:
ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash)
if ss_res is None:
self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None
_, _, sub_slot_start_iters = ss_res
sp_total_iters = uint128(
sub_slot_start_iters
+ calculate_sp_iters(
self.constants,
sub_slot_iters,
unfinished_block.reward_chain_block.signage_point_index,
)
)
block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
request.challenge_chain_ip_vdf,
request.challenge_chain_ip_proof,
request.reward_chain_ip_vdf,
request.reward_chain_ip_proof,
request.infused_challenge_chain_ip_vdf,
request.infused_challenge_chain_ip_proof,
finished_sub_slots,
prev_b,
self.blockchain,
sp_total_iters,
difficulty,
)
if not self.has_valid_pool_sig(block):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
try:
await self.respond_block(full_node_protocol.RespondBlock(block))
except Exception as e:
self.log.warning(f"Consensus error validating block: {e}")
if timelord_peer is not None:
# Only sends to the timelord who sent us this VDF, to reset them to the correct peak
await self.send_peak_to_timelords(peer=timelord_peer)
return None
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Tuple[Optional[Message], bool]:
fetched_ss = self.full_node_store.get_sub_slot(request.end_of_slot_bundle.challenge_chain.get_hash())
if fetched_ss is not None:
# Already have the sub-slot
return None, True
async with self.timelord_lock:
fetched_ss = self.full_node_store.get_sub_slot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
if (
(fetched_ss is None)
and request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
!= self.constants.GENESIS_CHALLENGE
):
# If we don't have the prev, request the prev instead
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
uint8(0),
bytes([0] * 32),
)
return (
make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request),
False,
)
peak = self.blockchain.get_peak()
if peak is not None and peak.height > 2:
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
else:
next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
next_difficulty = self.constants.DIFFICULTY_STARTING
new_infusions = self.full_node_store.new_finished_sub_slot(
request.end_of_slot_bundle,
self.blockchain,
peak,
await self.blockchain.get_full_peak(),
)
if new_infusions is not None:
self.log.info(
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
f"{request.end_of_slot_bundle.challenge_chain.get_hash()}, "
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
f"RC hash: {request.end_of_slot_bundle.reward_chain.get_hash()}, "
f"Deficit {request.end_of_slot_bundle.reward_chain.deficit}"
)
# Notify full nodes of the new sub-slot
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
request.end_of_slot_bundle.challenge_chain.get_hash(),
uint8(0),
request.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
for infusion in new_infusions:
await self.new_infusion_point_vdf(infusion)
# Notify farmers of the new sub-slot
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.reward_chain.get_hash(),
next_difficulty,
next_sub_slot_iters,
uint8(0),
)
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
return None, True
else:
self.log.info(
f"End of slot not added CC challenge "
f"{request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
)
return None, False
async def respond_transaction(
self,
transaction: SpendBundle,
spend_name: bytes32,
peer: Optional[ws.WSChiaConnection] = None,
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
if self.sync_store.get_sync_mode():
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
if not test and not (await self.synced()):
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
peak_height = self.blockchain.get_peak_height()
# No transactions in mempool in initial client. Remove 6 weeks after launch
if (
peak_height is None
or peak_height <= self.constants.INITIAL_FREEZE_PERIOD
or self.constants.NETWORK_TYPE == NetworkType.MAINNET
):
return MempoolInclusionStatus.FAILED, Err.INITIAL_TRANSACTION_FREEZE
if self.mempool_manager.seen(spend_name):
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
self.mempool_manager.add_and_maybe_pop_seen(spend_name)
self.log.debug(f"Processing transaction: {spend_name}")
# Ignore if syncing
if self.sync_store.get_sync_mode():
status = MempoolInclusionStatus.FAILED
error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING
else:
try:
cost_result = await self.mempool_manager.pre_validate_spendbundle(transaction)
except Exception as e:
self.mempool_manager.remove_seen(spend_name)
raise e
async with self.blockchain.lock:
if self.mempool_manager.get_spendbundle(spend_name) is not None:
self.mempool_manager.remove_seen(spend_name)
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
cost, status, error = await self.mempool_manager.add_spendbundle(transaction, cost_result, spend_name)
if status == MempoolInclusionStatus.SUCCESS:
self.log.debug(f"Added transaction to mempool: {spend_name}")
# Only broadcast successful transactions, not pending ones. Otherwise it's a DOS
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
cost,
uint64(transaction.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
if peer is None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
else:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
self.mempool_manager.remove_seen(spend_name)
self.log.warning(
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
)
return status, error
async def _needs_compact_proof(
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
) -> bool:
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_SP_VDF:
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
return False
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
assert header_block.challenge_chain_sp_proof is not None
if (
header_block.challenge_chain_sp_proof.witness_type == 0
and header_block.challenge_chain_sp_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_IP_VDF:
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
if (
header_block.challenge_chain_ip_proof.witness_type == 0
and header_block.challenge_chain_ip_proof.normalized_to_identity
):
return False
return True
return False
async def _can_accept_compact_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
header_hash: bytes32,
field_vdf: CompressibleVDFField,
) -> bool:
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
if is_fully_compactified is None or is_fully_compactified:
self.log.info(f"Already compactified block: {header_hash}. Ignoring.")
return False
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"Received vdf proof is not compact: {vdf_proof}.")
return False
if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info):
self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.")
return False
header_block = await self.blockchain.get_header_block_by_height(height, header_hash)
if header_block is None:
self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}")
return False
is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf)
if not is_new_proof:
self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.")
return is_new_proof
async def _replace_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
field_vdf: CompressibleVDFField,
):
full_blocks = await self.block_store.get_full_blocks_at([height])
assert len(full_blocks) > 0
for block in full_blocks:
new_block = None
block_record = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(height))
assert block_record is not None
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.CC_SP_VDF:
assert block.challenge_chain_sp_proof is not None
new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof)
if field_vdf == CompressibleVDFField.CC_IP_VDF:
new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof)
assert new_block is not None
await self.block_store.add_full_block(new_block, block_record)
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
if is_fully_compactified is None or is_fully_compactified:
return False
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
field_vdf = CompressibleVDFField(int(request.field_vdf))
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
msg = make_msg(
ProtocolMessageTypes.request_compact_vdf,
full_node_protocol.RequestCompactVDF(
request.height, request.header_hash, request.field_vdf, request.vdf_info
),
)
await peer.send_message(msg)
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
vdf_proof: Optional[VDFProof] = None
field_vdf = CompressibleVDFField(int(request.field_vdf))
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
):
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
break
if (
field_vdf == CompressibleVDFField.CC_SP_VDF
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_sp_proof
if (
field_vdf == CompressibleVDFField.CC_IP_VDF
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_ip_proof
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
return
compact_vdf = full_node_protocol.RespondCompactVDF(
request.height,
request.header_hash,
request.field_vdf,
request.vdf_info,
vdf_proof,
)
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
await peer.send_message(msg)
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
return
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
async def broadcast_uncompact_blocks(self, uncompact_interval_scan: int, target_uncompact_proofs: int):
min_height: Optional[int] = 0
try:
while not self._shut_down:
while self.sync_store.get_sync_mode():
if self._shut_down:
return
await asyncio.sleep(30)
broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = []
new_min_height = None
max_height = self.blockchain.get_peak_height()
if max_height is None:
await asyncio.sleep(30)
continue
# Calculate 'min_height' correctly the first time this task is launched, using the db.
assert min_height is not None
min_height = await self.block_store.get_first_not_compactified(min_height)
if min_height is None or min_height > max(0, max_height - 1000):
min_height = max(0, max_height - 1000)
batches_finished = 0
self.log.info("Scanning the blockchain for uncompact blocks.")
for h in range(min_height, max_height, 100):
# Got 10 times the target header count, sampling the target headers should contain
# enough randomness to split the work between blueboxes.
if len(broadcast_list) > target_uncompact_proofs * 10:
break
stop_height = min(h + 99, max_height)
headers = await self.blockchain.get_header_blocks_in_range(min_height, stop_height)
for header in headers.values():
prev_broadcast_list_len = len(broadcast_list)
expected_header_hash = self.blockchain.height_to_hash(header.height)
if header.header_hash != expected_header_hash:
continue
for sub_slot in header.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_EOS_VDF),
)
)
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
assert sub_slot.infused_challenge_chain is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.ICC_EOS_VDF),
)
)
if header.challenge_chain_sp_proof is not None and (
header.challenge_chain_sp_proof.witness_type > 0
or not header.challenge_chain_sp_proof.normalized_to_identity
):
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_sp_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_SP_VDF),
)
)
if (
header.challenge_chain_ip_proof.witness_type > 0
or not header.challenge_chain_ip_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_ip_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_IP_VDF),
)
)
# This is the first header with uncompact proofs. Store its height so next time we iterate
# only from here. Fix header block iteration window to at least 1000, so reorgs will be
# handled correctly.
if prev_broadcast_list_len == 0 and len(broadcast_list) > 0 and h <= max(0, max_height - 1000):
new_min_height = header.height
# Small sleep between batches.
batches_finished += 1
if batches_finished % 10 == 0:
await asyncio.sleep(1)
# We have no uncompact blocks, but mentain the block iteration window to at least 1000 blocks.
if new_min_height is None:
new_min_height = max(0, max_height - 1000)
min_height = new_min_height
if len(broadcast_list) > target_uncompact_proofs:
random.shuffle(broadcast_list)
broadcast_list = broadcast_list[:target_uncompact_proofs]
if self.sync_store.get_sync_mode():
continue
if self.server is not None:
for new_pot in broadcast_list:
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
await self.server.send_to_all([msg], NodeType.TIMELORD)
await asyncio.sleep(uncompact_interval_scan)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
self.log.error(f"Exception Stack: {error_stack}")
| true | true |
f71b10389f6b985a22b63f8c11ff239efa2dcf22 | 767 | py | Python | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020
# Author: xiaoweixiang
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "16.8"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
| 30.68 | 79 | 0.744459 |
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "16.8"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
| true | true |
f71b11050a7b8a94c7408d4babb5e1fedbe1941d | 4,418 | py | Python | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T22:43:22.000Z | 2021-08-09T22:43:22.000Z | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | null | null | null | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | null | null | null | # Residual Dense Network for Image Super-Resolution
# https://arxiv.org/abs/1802.08797
# modified from: https://github.com/thstkdgus35/EDSR-PyTorch
from argparse import Namespace
import torch
import torch.nn as nn
from models import register
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
#nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
# Local Feature Fusion
self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)
#self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
self.args = args
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
# number of RDB blocks, conv layers, out channels
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
# Shallow feature extraction net
#self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
#self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
# Redidual dense blocks and dense feature fusion
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
# Global Feature Fusion
self.GFF = nn.Sequential(*[
#nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
#nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
if args.no_upsampling:
self.out_dim = G0
else:
self.out_dim = args.n_colors
# Up-sampling net
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
if self.args.no_upsampling:
return x
else:
return self.UPNet(x)
@register('rdn')
def make_rdn(G0=64, RDNkSize=3, RDNconfig='B',
scale=2, no_upsampling=False):
args = Namespace()
args.G0 = G0
args.RDNkSize = RDNkSize
args.RDNconfig = RDNconfig
args.scale = [scale]
args.no_upsampling = no_upsampling
args.n_colors = 3
return RDN(args)
| 32.725926 | 92 | 0.51675 |
from argparse import Namespace
import torch
import torch.nn as nn
from models import register
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
self.args = args
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
self.GFF = nn.Sequential(*[
nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
if args.no_upsampling:
self.out_dim = G0
else:
self.out_dim = args.n_colors
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
if self.args.no_upsampling:
return x
else:
return self.UPNet(x)
@register('rdn')
def make_rdn(G0=64, RDNkSize=3, RDNconfig='B',
scale=2, no_upsampling=False):
args = Namespace()
args.G0 = G0
args.RDNkSize = RDNkSize
args.RDNconfig = RDNconfig
args.scale = [scale]
args.no_upsampling = no_upsampling
args.n_colors = 3
return RDN(args)
| true | true |
f71b1176d7d60f57abf30cc89fa766a5c19610b3 | 588 | py | Python | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 64 | 2017-09-30T16:23:43.000Z | 2022-03-30T23:26:50.000Z | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 339 | 2017-10-26T06:59:14.000Z | 2022-03-10T22:34:29.000Z | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 58 | 2017-09-16T17:25:10.000Z | 2022-03-04T18:14:02.000Z | """
Decouples SocialApp client credentials from the database
"""
from django.conf import settings
class SocialAppMixin:
class Meta:
abstract = True
# Get credentials to be used by OAuth2Client
def get_app(self, request):
app = settings.SOCIAL_APPS.get(self.id)
from allauth.socialaccount.models import SocialApp
return SocialApp(
id=app.get('id'),
name='SocialApp instance',
provider=self.id,
client_id=app.get('client_id'),
secret=app.get('secret'),
key=''
)
| 25.565217 | 58 | 0.605442 | from django.conf import settings
class SocialAppMixin:
class Meta:
abstract = True
def get_app(self, request):
app = settings.SOCIAL_APPS.get(self.id)
from allauth.socialaccount.models import SocialApp
return SocialApp(
id=app.get('id'),
name='SocialApp instance',
provider=self.id,
client_id=app.get('client_id'),
secret=app.get('secret'),
key=''
)
| true | true |
f71b12227f1c54ad2eb63c792a779e62f95046dc | 1,353 | py | Python | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | 1 | 2021-10-14T01:23:31.000Z | 2021-10-14T01:23:31.000Z | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | 1 | 2021-09-22T19:41:41.000Z | 2021-09-22T19:41:41.000Z | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | null | null | null | import graphene
from graphene_django.types import DjangoObjectType
from .models import Category, Ingredient
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class IngredientType(DjangoObjectType):
class Meta:
model = Ingredient
class Query(object):
category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String())
all_categories = graphene.List(CategoryType)
Ingredient = graphene.Field(IngredientType, id=graphene.Int(), name=graphene.String())
all_ingredients = graphene.List(IngredientType)
def resolve_all_categories(self, info, **kwargs):
return Category.objects.all()
def resolve_all_ingredients(self, info, **kwargs):
return Ingredient.objects.all()
def resolve_category(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_ingredient(self, info, **kwrags):
id = kwrags.get('id')
name = kwrags.get('name')
if id is not None:
return Ingredient.objects.get(pk=id)
if name is not None:
return Ingredient.objects.get(name=name)
return None
| 27.612245 | 90 | 0.659276 | import graphene
from graphene_django.types import DjangoObjectType
from .models import Category, Ingredient
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class IngredientType(DjangoObjectType):
class Meta:
model = Ingredient
class Query(object):
category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String())
all_categories = graphene.List(CategoryType)
Ingredient = graphene.Field(IngredientType, id=graphene.Int(), name=graphene.String())
all_ingredients = graphene.List(IngredientType)
def resolve_all_categories(self, info, **kwargs):
return Category.objects.all()
def resolve_all_ingredients(self, info, **kwargs):
return Ingredient.objects.all()
def resolve_category(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_ingredient(self, info, **kwrags):
id = kwrags.get('id')
name = kwrags.get('name')
if id is not None:
return Ingredient.objects.get(pk=id)
if name is not None:
return Ingredient.objects.get(name=name)
return None
| true | true |
f71b132f36f52a58f2c573303fc3826c3fffb90a | 6,341 | py | Python | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 48 | 2016-11-22T09:18:50.000Z | 2018-01-14T14:06:49.000Z | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 59 | 2016-12-03T15:52:36.000Z | 2018-01-16T09:37:15.000Z | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 51 | 2016-11-25T14:53:55.000Z | 2018-01-16T09:58:56.000Z | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import FRI, SUN
from holidays.constants import (
JAN,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class SouthAfrica(HolidayBase):
country = "ZA"
def __init__(self, **kwargs):
# http://www.gov.za/about-sa/public-holidays
# https://en.wikipedia.org/wiki/Public_holidays_in_South_Africa
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Observed since 1910, with a few name changes
if year > 1909:
self[date(year, 1, 1)] = "New Year's Day"
e = easter(year)
good_friday = e - rd(days=2)
easter_monday = e + rd(days=1)
self[good_friday] = "Good Friday"
if year > 1979:
self[easter_monday] = "Family Day"
else:
self[easter_monday] = "Easter Monday"
if 1909 < year < 1952:
dec_16_name = "Dingaan's Day"
elif 1951 < year < 1980:
dec_16_name = "Day of the Covenant"
elif 1979 < year < 1995:
dec_16_name = "Day of the Vow"
else:
dec_16_name = "Day of Reconciliation"
self[date(year, DEC, 16)] = dec_16_name
self[date(year, DEC, 25)] = "Christmas Day"
if year > 1979:
dec_26_name = "Day of Goodwill"
else:
dec_26_name = "Boxing Day"
self[date(year, 12, 26)] = dec_26_name
# Observed since 1995/1/1
if year > 1994:
self[date(year, MAR, 21)] = "Human Rights Day"
self[date(year, APR, 27)] = "Freedom Day"
self[date(year, MAY, 1)] = "Workers' Day"
self[date(year, JUN, 16)] = "Youth Day"
self[date(year, AUG, 9)] = "National Women's Day"
self[date(year, SEP, 24)] = "Heritage Day"
# Once-off public holidays
national_election = "National and provincial government elections"
y2k = "Y2K changeover"
local_election = "Local government elections"
presidential = "By presidential decree"
municipal_election = "Municipal elections"
if year == 1999:
self[date(1999, JUN, 2)] = national_election
self[date(1999, DEC, 31)] = y2k
if year == 2000:
self[date(2000, JAN, 2)] = y2k
if year == 2004:
self[date(2004, APR, 14)] = national_election
if year == 2006:
self[date(2006, MAR, 1)] = local_election
if year == 2008:
self[date(2008, MAY, 2)] = presidential
if year == 2009:
self[date(2009, APR, 22)] = national_election
if year == 2011:
self[date(2011, MAY, 18)] = local_election
self[date(2011, DEC, 27)] = presidential
if year == 2014:
self[date(2014, MAY, 7)] = national_election
if year == 2016:
self[date(2016, AUG, 3)] = local_election
if year == 2019:
self[date(2019, MAY, 8)] = national_election
if year == 2021:
self[date(2021, NOV, 1)] = municipal_election
# As of 1995/1/1, whenever a public holiday falls on a Sunday,
# it rolls over to the following Monday
for k, v in list(self.items()):
if (
self.observed
and year > 1994
and k.weekday() == SUN
and k.year == year
):
add_days = 1
while self.get(k + rd(days=add_days)) is not None:
add_days += 1
self[k + rd(days=add_days)] = v + " (Observed)"
# Historic public holidays no longer observed
if 1951 < year < 1974:
self[date(year, APR, 6)] = "Van Riebeeck's Day"
elif 1979 < year < 1995:
self[date(year, APR, 6)] = "Founder's Day"
if 1986 < year < 1990:
historic_workers_day = datetime(year, MAY, 1)
# observed on first Friday in May
while historic_workers_day.weekday() != FRI:
historic_workers_day += rd(days=1)
self[historic_workers_day] = "Workers' Day"
if 1909 < year < 1994:
ascension_day = e + rd(days=40)
self[ascension_day] = "Ascension Day"
if 1909 < year < 1952:
self[date(year, MAY, 24)] = "Empire Day"
if 1909 < year < 1961:
self[date(year, MAY, 31)] = "Union Day"
elif 1960 < year < 1994:
self[date(year, MAY, 31)] = "Republic Day"
if 1951 < year < 1961:
queens_birthday = datetime(year, JUN, 7)
# observed on second Monday in June
while queens_birthday.weekday() != 0:
queens_birthday += rd(days=1)
self[queens_birthday] = "Queen's Birthday"
if 1960 < year < 1974:
self[date(year, JUL, 10)] = "Family Day"
if 1909 < year < 1952:
kings_birthday = datetime(year, AUG, 1)
# observed on first Monday in August
while kings_birthday.weekday() != 0:
kings_birthday += rd(days=1)
self[kings_birthday] = "King's Birthday"
if 1951 < year < 1980:
settlers_day = datetime(year, SEP, 1)
while settlers_day.weekday() != 0:
settlers_day += rd(days=1)
self[settlers_day] = "Settlers' Day"
if 1951 < year < 1994:
self[date(year, OCT, 10)] = "Kruger Day"
class ZA(SouthAfrica):
pass
class ZAF(SouthAfrica):
pass
| 33.026042 | 78 | 0.543763 |
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import FRI, SUN
from holidays.constants import (
JAN,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class SouthAfrica(HolidayBase):
country = "ZA"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
if year > 1909:
self[date(year, 1, 1)] = "New Year's Day"
e = easter(year)
good_friday = e - rd(days=2)
easter_monday = e + rd(days=1)
self[good_friday] = "Good Friday"
if year > 1979:
self[easter_monday] = "Family Day"
else:
self[easter_monday] = "Easter Monday"
if 1909 < year < 1952:
dec_16_name = "Dingaan's Day"
elif 1951 < year < 1980:
dec_16_name = "Day of the Covenant"
elif 1979 < year < 1995:
dec_16_name = "Day of the Vow"
else:
dec_16_name = "Day of Reconciliation"
self[date(year, DEC, 16)] = dec_16_name
self[date(year, DEC, 25)] = "Christmas Day"
if year > 1979:
dec_26_name = "Day of Goodwill"
else:
dec_26_name = "Boxing Day"
self[date(year, 12, 26)] = dec_26_name
if year > 1994:
self[date(year, MAR, 21)] = "Human Rights Day"
self[date(year, APR, 27)] = "Freedom Day"
self[date(year, MAY, 1)] = "Workers' Day"
self[date(year, JUN, 16)] = "Youth Day"
self[date(year, AUG, 9)] = "National Women's Day"
self[date(year, SEP, 24)] = "Heritage Day"
national_election = "National and provincial government elections"
y2k = "Y2K changeover"
local_election = "Local government elections"
presidential = "By presidential decree"
municipal_election = "Municipal elections"
if year == 1999:
self[date(1999, JUN, 2)] = national_election
self[date(1999, DEC, 31)] = y2k
if year == 2000:
self[date(2000, JAN, 2)] = y2k
if year == 2004:
self[date(2004, APR, 14)] = national_election
if year == 2006:
self[date(2006, MAR, 1)] = local_election
if year == 2008:
self[date(2008, MAY, 2)] = presidential
if year == 2009:
self[date(2009, APR, 22)] = national_election
if year == 2011:
self[date(2011, MAY, 18)] = local_election
self[date(2011, DEC, 27)] = presidential
if year == 2014:
self[date(2014, MAY, 7)] = national_election
if year == 2016:
self[date(2016, AUG, 3)] = local_election
if year == 2019:
self[date(2019, MAY, 8)] = national_election
if year == 2021:
self[date(2021, NOV, 1)] = municipal_election
for k, v in list(self.items()):
if (
self.observed
and year > 1994
and k.weekday() == SUN
and k.year == year
):
add_days = 1
while self.get(k + rd(days=add_days)) is not None:
add_days += 1
self[k + rd(days=add_days)] = v + " (Observed)"
if 1951 < year < 1974:
self[date(year, APR, 6)] = "Van Riebeeck's Day"
elif 1979 < year < 1995:
self[date(year, APR, 6)] = "Founder's Day"
if 1986 < year < 1990:
historic_workers_day = datetime(year, MAY, 1)
while historic_workers_day.weekday() != FRI:
historic_workers_day += rd(days=1)
self[historic_workers_day] = "Workers' Day"
if 1909 < year < 1994:
ascension_day = e + rd(days=40)
self[ascension_day] = "Ascension Day"
if 1909 < year < 1952:
self[date(year, MAY, 24)] = "Empire Day"
if 1909 < year < 1961:
self[date(year, MAY, 31)] = "Union Day"
elif 1960 < year < 1994:
self[date(year, MAY, 31)] = "Republic Day"
if 1951 < year < 1961:
queens_birthday = datetime(year, JUN, 7)
# observed on second Monday in June
while queens_birthday.weekday() != 0:
queens_birthday += rd(days=1)
self[queens_birthday] = "Queen's Birthday"
if 1960 < year < 1974:
self[date(year, JUL, 10)] = "Family Day"
if 1909 < year < 1952:
kings_birthday = datetime(year, AUG, 1)
while kings_birthday.weekday() != 0:
kings_birthday += rd(days=1)
self[kings_birthday] = "King's Birthday"
if 1951 < year < 1980:
settlers_day = datetime(year, SEP, 1)
while settlers_day.weekday() != 0:
settlers_day += rd(days=1)
self[settlers_day] = "Settlers' Day"
if 1951 < year < 1994:
self[date(year, OCT, 10)] = "Kruger Day"
class ZA(SouthAfrica):
pass
class ZAF(SouthAfrica):
pass
| true | true |
f71b157abd84e0fcedf83134af5ef10acd81bdb3 | 1,972 | py | Python | tests/test_settings.py | Gilnaa/Hydra | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 5 | 2019-07-11T09:24:29.000Z | 2020-10-07T08:11:29.000Z | tests/test_settings.py | Gilnaa/Hydras | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 3 | 2019-11-05T11:33:30.000Z | 2020-08-20T12:15:29.000Z | tests/test_settings.py | Gilnaa/Hydra | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 2 | 2018-12-17T12:56:53.000Z | 2018-12-24T14:09:50.000Z | #!/usr/bin/env python
from .utils import *
# This struct's endianness is of the "target"
class TargetStruct(Struct):
a = u16(0xAABB)
# while this struct's endianness is always big.
class SpecificStruct(Struct):
a = u16_be(0xAABB)
class SettingsTests(HydrasTestCase):
def test_priority(self):
s = SpecificStruct()
h = TargetStruct()
# 1. Global - Make sure that the serialized struct reacts to the global settings.
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(), b'\xBB\xAA')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h.serialize(), b'\xAA\xBB')
# 2. Serialization-settings - Make sure that the struct uses the overriden endianness
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(h, TargetStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)))
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h, TargetStruct.deserialize(b'\xBB\xAA', HydraSettings(target_endian=Endianness.LITTLE)))
# 3. Field-settings - Make sure that the BE fields ignore any settings
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(s.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(s.serialize(), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.LITTLE)), b'\xAA\xBB')
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)), s)
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.LITTLE)), s)
if __name__ == '__main__':
unittest.main() | 40.244898 | 116 | 0.712475 |
from .utils import *
class TargetStruct(Struct):
a = u16(0xAABB)
# while this struct's endianness is always big.
class SpecificStruct(Struct):
a = u16_be(0xAABB)
class SettingsTests(HydrasTestCase):
def test_priority(self):
s = SpecificStruct()
h = TargetStruct()
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(), b'\xBB\xAA')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(h, TargetStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)))
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h, TargetStruct.deserialize(b'\xBB\xAA', HydraSettings(target_endian=Endianness.LITTLE)))
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(s.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(s.serialize(), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.LITTLE)), b'\xAA\xBB')
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)), s)
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.LITTLE)), s)
if __name__ == '__main__':
unittest.main() | true | true |
f71b1592752b5eb648bd828db7dbdcaf6507e648 | 2,687 | py | Python | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | 1 | 2020-12-21T22:23:09.000Z | 2020-12-21T22:23:09.000Z | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | 150 | 2019-08-28T20:20:01.000Z | 2020-07-12T07:09:05.000Z | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-09-01 00:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('rgb', models.CharField(max_length=6)),
('transparent', models.BooleanField()),
],
),
migrations.CreateModel(
name='PartCategory',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Part',
fields=[
('part_num', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('height', models.PositiveIntegerField(blank=True, null=True)),
('length', models.PositiveIntegerField(blank=True, null=True)),
('stud_count', models.PositiveIntegerField(blank=True, null=True)),
('multi_height', models.BooleanField(blank=True, null=True)),
('uneven_dimensions', models.BooleanField(blank=True, null=True)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parts', to='inventory.PartCategory')),
],
),
migrations.CreateModel(
name='UserPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Color')),
('part_num', models.ForeignKey(db_column='part_num_id', on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Part')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user_id', 'part_num', 'color')},
},
),
]
| 44.783333 | 166 | 0.596576 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('rgb', models.CharField(max_length=6)),
('transparent', models.BooleanField()),
],
),
migrations.CreateModel(
name='PartCategory',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Part',
fields=[
('part_num', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('height', models.PositiveIntegerField(blank=True, null=True)),
('length', models.PositiveIntegerField(blank=True, null=True)),
('stud_count', models.PositiveIntegerField(blank=True, null=True)),
('multi_height', models.BooleanField(blank=True, null=True)),
('uneven_dimensions', models.BooleanField(blank=True, null=True)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parts', to='inventory.PartCategory')),
],
),
migrations.CreateModel(
name='UserPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Color')),
('part_num', models.ForeignKey(db_column='part_num_id', on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Part')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user_id', 'part_num', 'color')},
},
),
]
| true | true |
f71b1688f1e13f2ccad8ec2d54fddf589eeb3e82 | 831 | py | Python | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 102 | 2015-05-20T01:17:43.000Z | 2022-03-07T06:03:55.000Z | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 901 | 2015-01-07T23:11:57.000Z | 2022-03-18T13:56:12.000Z | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 65 | 2015-02-06T23:00:26.000Z | 2022-01-22T07:58:44.000Z | import pytest
def test_annotation_with_subtype(
testapp,
submitter_testapp,
annotation_dhs,
annotation_ccre_2,
annotation_dataset
):
testapp.patch_json(
annotation_dhs['@id'],
{'annotation_subtype': 'all'},
status=200)
# annotation_subtype can only be submitted with admin permissions
res = testapp.post_json('/annotation', annotation_ccre_2, status=201)
submitter_testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=422)
testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=200)
# annotation_subtype may be submitted for cCRE or rDHS only
testapp.patch_json(
annotation_dataset['@id'],
{'annotation_subtype': 'all'},
status=422)
| 29.678571 | 73 | 0.649819 | import pytest
def test_annotation_with_subtype(
testapp,
submitter_testapp,
annotation_dhs,
annotation_ccre_2,
annotation_dataset
):
testapp.patch_json(
annotation_dhs['@id'],
{'annotation_subtype': 'all'},
status=200)
res = testapp.post_json('/annotation', annotation_ccre_2, status=201)
submitter_testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=422)
testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=200)
testapp.patch_json(
annotation_dataset['@id'],
{'annotation_subtype': 'all'},
status=422)
| true | true |
f71b16a8b249c28b8c45d7997b87d3e69e8d654b | 592 | py | Python | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | 1 | 2017-05-02T10:02:28.000Z | 2017-05-02T10:02:28.000Z | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*- coding:utf-8 -*-
"""
Author: AsherYang
Email: 1181830457@qq.com
Date: 2017/7/24
"""
class Token():
@property
def access_token(self):
return self.access_token
@property
def access_token(self, value):
self.access_token = value
@property
def expire_in(self):
return self.expire_in
@property
def expire_in(self, value):
self.expire_in = value
@property
def update_time(self):
return self.update_time
@property
def update_time(self, value):
self.update_time = value
| 16.444444 | 34 | 0.621622 |
class Token():
@property
def access_token(self):
return self.access_token
@property
def access_token(self, value):
self.access_token = value
@property
def expire_in(self):
return self.expire_in
@property
def expire_in(self, value):
self.expire_in = value
@property
def update_time(self):
return self.update_time
@property
def update_time(self, value):
self.update_time = value
| true | true |
f71b16c4ec2d0b67810f480a086abbd14c87ad42 | 3,454 | py | Python | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 1 | 2020-01-14T03:20:44.000Z | 2020-01-14T03:20:44.000Z | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 1 | 2019-08-08T10:36:44.000Z | 2019-08-09T05:58:23.000Z | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 5 | 2016-06-24T20:03:52.000Z | 2020-02-05T10:14:54.000Z | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class MetadataNamespacesTest(base.BaseV2ImageTest):
"""Test the Metadata definition Namespaces basic functionality"""
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
# get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
name = [{'name': resource_name}]
namespace_name = data_utils.rand_name('namespace')
# create the metadef namespace
body = self.namespaces_client.create_namespace(
namespace=namespace_name,
visibility='public',
description='Tempest',
display_name=namespace_name,
resource_type_associations=name,
protected=True)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_namespace, namespace_name)
# list namespaces
bodys = self.namespaces_client.list_namespaces()['namespaces']
body = [namespace['namespace'] for namespace in bodys]
self.assertIn(namespace_name, body)
# get namespace details
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
self.assertEqual('public', body['visibility'])
# unable to delete protected namespace
self.assertRaises(lib_exc.Forbidden,
self.namespaces_client.delete_namespace,
namespace_name)
# update the visibility to private and protected to False
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.assertEqual('private', body['visibility'])
self.assertEqual(False, body['protected'])
# now able to delete the non-protected namespace
self.namespaces_client.delete_namespace(namespace_name)
def _cleanup_namespace(self, namespace_name):
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.namespaces_client.delete_namespace(namespace_name)
| 44.857143 | 78 | 0.689925 |
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class MetadataNamespacesTest(base.BaseV2ImageTest):
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
name = [{'name': resource_name}]
namespace_name = data_utils.rand_name('namespace')
body = self.namespaces_client.create_namespace(
namespace=namespace_name,
visibility='public',
description='Tempest',
display_name=namespace_name,
resource_type_associations=name,
protected=True)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_namespace, namespace_name)
bodys = self.namespaces_client.list_namespaces()['namespaces']
body = [namespace['namespace'] for namespace in bodys]
self.assertIn(namespace_name, body)
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
self.assertEqual('public', body['visibility'])
self.assertRaises(lib_exc.Forbidden,
self.namespaces_client.delete_namespace,
namespace_name)
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.assertEqual('private', body['visibility'])
self.assertEqual(False, body['protected'])
self.namespaces_client.delete_namespace(namespace_name)
def _cleanup_namespace(self, namespace_name):
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.namespaces_client.delete_namespace(namespace_name)
| true | true |
f71b16cbd42077bfe153963de61a031de29f4b72 | 49,097 | py | Python | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | """
Kernel installation task
"""
import logging
import os
import re
import shlex
from io import StringIO
from teuthology.util.compat import urljoin
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.exceptions import (
UnsupportedPackageTypeError,
ConfigError,
VersionNotFoundError,
)
from teuthology.packaging import (
install_package,
get_koji_build_info,
get_kojiroot_base_url,
get_koji_package_name,
get_koji_task_rpm_info,
get_koji_task_result,
get_builder_project,
)
log = logging.getLogger(__name__)
CONFIG_DEFAULT = {'branch': 'master'}
TIMEOUT_DEFAULT = 300
VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task']
def normalize_config(ctx, config):
"""
Returns a config whose keys are all real roles.
Generic roles (client, mon, osd, etc.) are replaced with
the actual roles (client.0, client.1, etc.). If the config
specifies a different version for a specific role, this is
unchanged.
For example, with 4 OSDs this::
osd:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.3:
deb: /path/to/linux-whatever.deb
is transformed into::
osd.0:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.2:
tag: v3.0
kdb: true
osd.3:
deb: /path/to/linux-whatever.deb
If config is None or just specifies a version to use,
it is applied to all nodes.
:param ctx: Context
:param config: Configuration
"""
if not config or \
len([x for x in config.keys() if x in
VERSION_KEYS + ['kdb', 'flavor']]) == len(config.keys()):
new_config = {}
if not config:
config = CONFIG_DEFAULT
for role in teuthology.all_roles(ctx.cluster):
new_config[role] = config.copy()
return new_config
new_config = {}
for role, role_config in config.items():
if role_config is None:
role_config = CONFIG_DEFAULT
if '.' in role:
new_config[role] = role_config.copy()
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
# specific overrides generic
if name not in config:
new_config[name] = role_config.copy()
return new_config
def normalize_and_apply_overrides(ctx, config, overrides):
"""
kernel task config is hierarchical and needs to be transformed into
a normal form, see normalize_config() for details. Applying overrides is
also more involved compared to other tasks because of the number of ways
a version of the kernel to install can be specified.
Returns a (normalized config, timeout) tuple.
:param ctx: Context
:param config: Configuration
"""
timeout = TIMEOUT_DEFAULT
if 'timeout' in config:
timeout = config.pop('timeout')
config = normalize_config(ctx, config)
log.debug('normalized config %s' % config)
if 'timeout' in overrides:
timeout = overrides.pop('timeout')
if overrides:
overrides = normalize_config(ctx, overrides)
log.debug('normalized overrides %s' % overrides)
# Handle a case when a version specified with one type of version key
# is overridden by a version specified with another type of version key
# (e.g. 'branch: foo' is overridden with 'tag: bar'). To be able to
# use deep_merge(), drop all version keys from the original config if
# the corresponding override has a version key.
for role, role_config in config.items():
if (role in overrides and
any(k in overrides[role] for k in VERSION_KEYS)):
for k in VERSION_KEYS:
role_config.pop(k, None)
teuthology.deep_merge(config, overrides)
return (config, timeout)
def validate_config(ctx, config):
"""
Make sure that all kernels in the list of remove kernels
refer to the same kernel.
:param ctx: Context
:param config: Configuration
"""
for _, roles_for_host in ctx.cluster.remotes.items():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
if kernel is None:
kernel = role_kernel
elif role_kernel is not None:
assert kernel == role_kernel, \
"everything on the same host must use the same kernel"
if role in config:
del config[role]
def need_to_install(ctx, role, version):
"""
Check to see if we need to install a kernel. Get the version of the
currently running kernel, and compare it against the value passed in.
:param ctx: Context
:param role: Role
:param version: value to compare against (used in checking), can be either
a utsrelease string (e.g. '3.13.0-rc3-ceph-00049-ge2817b3')
or a sha1.
"""
ret = True
log.info('Checking kernel version of {role}, want "{ver}"...'.format(
role=role, ver=version))
uname_fp = StringIO()
ctx.cluster.only(role).run(
args=[
'uname',
'-r',
],
stdout=uname_fp,
)
cur_version = uname_fp.getvalue().rstrip('\n')
log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version,
want=version))
if '.' in str(version):
# version is utsrelease, yay
if cur_version == version:
log.debug('utsrelease strings match, do not need to install')
ret = False
else:
# version is sha1, need to try to extract sha1 from cur_version
match = re.search('[-_]g([0-9a-f]{6,40})', cur_version)
if match:
cur_sha1 = match.group(1)
log.debug('extracting sha1, {ver} -> {sha1}'.format(
ver=cur_version, sha1=cur_sha1))
m = min(len(cur_sha1), len(version))
assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m
if cur_sha1[0:m] == version[0:m]:
log.debug('extracted sha1 matches, do not need to install')
ret = False
else:
log.debug('failed to parse current kernel version')
uname_fp.close()
return ret
def install_firmware(ctx, config):
"""
Go to the github to get the latest firmware.
:param ctx: Context
:param config: Configuration
"""
linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
for role in config.keys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
if package_type == 'rpm':
role_remote.run(args=[
'sudo', 'yum', 'upgrade', '-y', 'linux-firmware',
])
continue
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
# kludge around mysterious 0-byte .git/HEAD files
'cd', fw_dir,
run.Raw('&&'),
'test', '-d', '.git',
run.Raw('&&'),
'test', '!', '-s', '.git/HEAD',
run.Raw('&&'),
'sudo', 'rm', '-rf', '.git',
run.Raw(';'),
# init
'sudo', 'install', '-d', '-m0755', fw_dir,
run.Raw('&&'),
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'init',
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
'--get', 'remote.origin.url', run.Raw('>/dev/null'),
run.Raw('||'),
'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
'remote', 'add', 'origin', uri,
],
)
# In case the remote already existed, set its url
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'remote',
'set-url', 'origin', uri, run.Raw('>/dev/null')
]
)
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'fetch', 'origin',
run.Raw('&&'),
'sudo', 'git', 'reset', '--hard', 'origin/master'
],
)
def gitbuilder_pkg_name(remote):
if remote.os.package_type == 'rpm':
pkg_name = 'kernel.x86_64.rpm'
elif remote.os.package_type == 'deb':
pkg_name = 'linux-image.deb'
else:
raise UnsupportedPackageTypeError(remote)
return pkg_name
def remote_pkg_path(remote):
"""
This is where kernel packages are copied over (in case of local
packages) or downloaded to (in case of gitbuilder packages) and
then installed from.
"""
return os.path.join('/tmp', gitbuilder_pkg_name(remote))
def download_kernel(ctx, config):
"""
Supply each remote with a kernel package:
- local kernels are copied over
- gitbuilder kernels are downloaded
- nothing is done for distro kernels
:param ctx: Context
:param config: Configuration
"""
procs = {}
for role, src in config.items():
needs_download = False
if src == 'distro':
# don't need to download distro kernels
log.debug("src is distro, skipping download");
continue
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, dict):
# we're downloading a kernel from koji, the src dict here
# is the build_info retrieved from koji using get_koji_build_info
if src.get("id"):
build_id = src["id"]
log.info("Downloading kernel with build_id {build_id} on {role}...".format(
build_id=build_id,
role=role
))
needs_download = True
baseurl = get_kojiroot_base_url(src)
pkg_name = get_koji_package_name("kernel", src)
elif src.get("task_id"):
needs_download = True
log.info("Downloading kernel with task_id {task_id} on {role}...".format(
task_id=src["task_id"],
role=role
))
baseurl = src["base_url"]
# this var is also poorly named as it's not the package name,
# but the full name of the rpm file to download.
pkg_name = src["rpm_name"]
elif src.find('/') >= 0:
# local package - src is path
log.info('Copying kernel package {path} to {role}...'.format(
path=src, role=role))
role_remote.put_file(src,remote_pkg_path(role_remote))
else:
# gitbuilder package - src is sha1
log.info('Downloading kernel {sha1} on {role}...'.format(
sha1=src,
role=role,
))
needs_download = True
builder = get_builder_project()(
'kernel',
{'sha1': src},
ctx=ctx,
remote=role_remote,
)
if teuth_config.use_shaman:
if role_remote.os.package_type == 'rpm':
arch = builder.arch
baseurl = urljoin(
builder.base_url,
'/'.join([arch, ''])
)
pkg_name = "kernel-%s.%s.rpm" % (
builder.version,
arch,
)
elif role_remote.os.package_type == 'deb':
arch = 'amd64' # FIXME
baseurl = urljoin(
builder.base_url,
'/'.join([
'pool', 'main', 'l',
'linux-%s' % builder.scm_version, ''
])
)
pkg_name = 'linux-image-%s_%s_%s.deb' % (
builder.scm_version,
builder.version,
arch,
)
else:
baseurl = builder.base_url + "/"
pkg_name = gitbuilder_pkg_name(role_remote)
log.info("fetching, builder baseurl is %s", baseurl)
if needs_download:
proc = role_remote.run(
args=[
'rm', '-f', remote_pkg_path(role_remote),
run.Raw('&&'),
'echo',
pkg_name,
run.Raw('|'),
'wget',
'-nv',
'-O',
remote_pkg_path(role_remote),
'--base={url}'.format(url=baseurl),
'--input-file=-',
],
wait=False)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
def _no_grub_link(in_file, remote, kernel_ver):
"""
Copy and link kernel related files if grub cannot be used
(as is the case in Arm kernels)
:param infile: kernel file or image file to be copied.
:param remote: remote machine
:param kernel_ver: kernel version
"""
boot1 = '/boot/%s' % in_file
boot2 = '%s.old' % boot1
remote.run(
args=[
'if', 'test', '-e', boot1, run.Raw(';'), 'then',
'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',],
)
remote.run(
args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ],
)
def install_latest_rh_kernel(ctx, config):
"""
Installs the lastest z stream kernel
Reboot for the new kernel to take effect
"""
if config is None:
config = {}
if config.get('skip'):
return
with parallel() as p:
for remote in ctx.cluster.remotes.keys():
p.spawn(update_rh_kernel, remote)
def update_rh_kernel(remote):
package_type = remote.os.package_type
remote.run(args=['uname', '-a'])
import time
if package_type == 'rpm':
update_log = remote.sh('sudo yum update -y kernel')
log.info(update_log)
if not update_log.find("Installed") == -1:
log.info("Kernel updated to latest z stream on %s", remote.shortname)
log.info("Rebooting %s", remote.shortname)
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
time.sleep(40)
log.info("Reconnecting after reboot")
remote.reconnect(timeout=300)
remote.run(args=['uname', '-a'])
elif not update_log.find('No packages marked for update') == -1:
log.info("Latest version already installed on %s", remote.shortname)
def install_and_reboot(ctx, config):
"""
Install and reboot the kernel. This mostly performs remote
installation operations. The code does check for Arm images
and skips grub operations if the kernel is Arm. Otherwise, it
extracts kernel titles from submenu entries and makes the appropriate
grub calls. The assumptions here are somewhat simplified in that
it expects kernel entries to be present under submenu entries.
:param ctx: Context
:param config: Configuration
"""
procs = {}
kernel_title = ''
for role, src in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_kernel(role_remote, version=src)
continue
log.info('Installing kernel {src} on {role}...'.format(src=src,
role=role))
package_type = role_remote.os.package_type
if package_type == 'rpm':
proc = role_remote.run(
args=[
'sudo',
'rpm',
'-ivh',
'--oldpackage',
'--replacefiles',
'--replacepkgs',
remote_pkg_path(role_remote),
])
install_kernel(role_remote, remote_pkg_path(role_remote))
continue
# TODO: Refactor this into install_kernel() so that it handles all
# cases for both rpm and deb packages.
proc = role_remote.run(
args=[
# install the kernel deb
'sudo',
'dpkg',
'-i',
remote_pkg_path(role_remote),
],
)
# collect kernel image name from the .deb
kernel_title = get_image_version(role_remote,
remote_pkg_path(role_remote))
log.info('searching for kernel {}'.format(kernel_title))
if kernel_title.endswith("-highbank"):
_no_grub_link('vmlinuz', role_remote, kernel_title)
_no_grub_link('initrd.img', role_remote, kernel_title)
proc = role_remote.run(
args=[
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
continue
# look for menuentry for our kernel, and collect any
# submenu entries for their titles. Assume that if our
# kernel entry appears later in the file than a submenu entry,
# it's actually nested under that submenu. If it gets more
# complex this will totally break.
kernel_entries = role_remote.sh([
'egrep',
'(submenu|menuentry.*' + kernel_title + ').*{',
'/boot/grub/grub.cfg'
]).split('\n')
submenu_title = ''
default_title = ''
for l in kernel_entries:
fields = shlex.split(l)
if len(fields) >= 2:
command, title = fields[:2]
if command == 'submenu':
submenu_title = title + '>'
if command == 'menuentry':
if title.endswith(kernel_title):
default_title = title
break
log.info('submenu_title:{}'.format(submenu_title))
log.info('default_title:{}'.format(default_title))
proc = role_remote.run(
args=[
# use the title(s) to construct the content of
# the grub menu entry, so we can default to it.
'/bin/echo',
'-e',
r'cat <<EOF\nset default="' + submenu_title + \
default_title + r'"\nEOF\n',
# make it look like an emacs backup file so
# unfortunately timed update-grub runs don't pick it
# up yet; use sudo tee so we are able to write to /etc
run.Raw('|'),
'sudo',
'tee',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('>/dev/null'),
run.Raw('&&'),
'sudo',
'chmod',
'a+x',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('&&'),
'sudo',
'mv',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
'/etc/grub.d/01_ceph_kernel',
# update grub again so it accepts our default
run.Raw('&&'),
'sudo',
'update-grub',
run.Raw('&&'),
'rm',
remote_pkg_path(role_remote),
run.Raw('&&'),
# work around a systemd issue, where network gets shut down
# before ssh can close its session
run.Raw('('),
'sleep',
'1',
run.Raw('&&'),
'sudo',
'shutdown',
'-r',
'now',
run.Raw('&'),
run.Raw(')'),
],
wait=False,
)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
def enable_disable_kdb(ctx, config):
"""
Enable kdb on remote machines in use. Disable on those that are
not in use.
:param ctx: Context
:param config: Configuration
"""
for role, enable in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
serialdev = "ttyS1"
if enable:
log.info('Enabling kdb on {role}...'.format(role=role))
try:
role_remote.run(
args=[
'echo', serialdev,
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
try:
role_remote.run(
args=[
'echo', '',
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc',
run.Raw('|'),
'true',
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, distro=False):
"""
Loop reconnecting and checking kernel versions until
they're all correct or the timeout is exceeded.
:param ctx: Context
:param need_install: list of packages that we need to reinstall.
:param timeout: number of second before we timeout.
"""
import time
# do not try to reconnect immediately after triggering the reboot,
# because the reboot sequence might not have started yet (!) --
# see https://tracker.ceph.com/issues/44187
time.sleep(30)
starttime = time.time()
while need_install:
teuthology.reconnect(ctx, timeout)
for client in list(need_install.keys()):
if 'distro' in str(need_install[client]):
distro = True
log.info('Checking client {client} for new kernel version...'.format(client=client))
try:
if distro:
(remote,) = ctx.cluster.only(client).remotes.keys()
assert not need_to_install_distro(remote), \
'failed to install new distro kernel version within timeout'
else:
assert not need_to_install(ctx, client, need_install[client]), \
'failed to install new kernel version within timeout'
del need_install[client]
except Exception:
log.exception("Saw exception")
# ignore connection resets and asserts while time is left
if time.time() - starttime > timeout:
raise
time.sleep(1)
def get_version_of_running_kernel(remote):
"""
Get the current running kernel version in a format that can be compared
with the output of "rpm -q kernel..."
"""
dist_release = remote.os.name
uname_r = remote.sh("uname -r").strip()
current = None
if dist_release in ['opensuse', 'sle']:
# "uname -r" returns 4.12.14-lp151.28.36-default
# "rpm -q kernel-default" returns 4.12.14-lp151.28.36.1.x86_64
# In order to be able to meaningfully check whether the former
# is "in" the latter, we have to chop off the "-default".
current = re.sub(r"-default$", "", uname_r)
else:
current = uname_r
return current
def need_to_install_distro(remote):
"""
Installing kernels on rpm won't setup grub/boot into them. This installs
the newest kernel package and checks its version and compares against
the running kernel (uname -r). Similar check for deb.
:returns: False if running the newest distro kernel. Returns the version of
the newest if it is not running.
"""
dist_release = remote.os.name
package_type = remote.os.package_type
current = get_version_of_running_kernel(remote)
log.info("Running kernel on {node}: {version}".format(
node=remote.shortname, version=current))
installed_version = None
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
install_stdout = remote.sh(
'sudo zypper --non-interactive install kernel-default'
)
else:
install_stdout = remote.sh(
'sudo yum install -y kernel'
)
match = re.search(
"Package (.*) already installed",
install_stdout, flags=re.MULTILINE)
if 'Nothing to do' in install_stdout:
installed_version = match.groups()[0] if match else ''
err_mess = StringIO()
err_mess.truncate(0)
remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum',
'reinstall', 'kernel', run.Raw('||'), 'true'],
stderr=err_mess)
reinstall_stderr = err_mess.getvalue()
err_mess.close()
if 'Skipping the running kernel' in reinstall_stderr:
running_version = re.search(
"Skipping the running kernel: (.*)",
reinstall_stderr, flags=re.MULTILINE).groups()[0]
if installed_version == running_version:
log.info(
'Newest distro kernel already installed and running')
return False
else:
remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel',
run.Raw('||'), 'true'])
newest = get_latest_image_version_rpm(remote)
if package_type == 'deb':
newest = get_latest_image_version_deb(remote, dist_release)
if current in newest or current.replace('-', '_') in newest:
log.info('Newest distro kernel installed and running')
return False
log.info(
'Not newest distro kernel. Current: {cur} Expected: {new}'.format(
cur=current, new=newest))
return newest
def maybe_generate_initrd_rpm(remote, path, version):
"""
Generate initrd with mkinitrd if the hooks that should make it
happen on its own aren't there.
:param path: rpm package path
:param version: kernel version to generate initrd for
e.g. 3.18.0-rc6-ceph-00562-g79a9fa5
"""
out = remote.sh(['rpm', '--scripts', '-qp', path])
if 'bin/installkernel' in out or 'bin/kernel-install' in out:
return
log.info("No installkernel or kernel-install hook in %s, "
"will generate initrd for %s", path, version)
remote.run(
args=[
'sudo',
'mkinitrd',
'--allow-missing',
'-f', # overwrite existing initrd
'/boot/initramfs-' + version + '.img',
version,
])
def install_kernel(remote, path=None, version=None):
"""
A bit of misnomer perhaps - the actual kernel package is installed
elsewhere, this function deals with initrd and grub. Currently the
following cases are handled:
- local, gitbuilder, distro for rpm packages
- distro for deb packages - see TODO in install_and_reboot()
TODO: reboots should be issued from install_and_reboot()
:param path: package path (for local and gitbuilder cases)
:param version: for RPM distro kernels, pass this to update_grub_rpm
"""
dist_release = remote.os.name
templ = "install_kernel(remote={remote}, path={path}, version={version})"
log.debug(templ.format(remote=remote, path=path, version=version))
package_type = remote.os.package_type
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
# FIXME
pass
else:
if path:
version = get_image_version(remote, path)
# This is either a gitbuilder or a local package and both of these
# could have been built with upstream rpm targets with specs that
# don't have a %post section at all, which means no initrd.
maybe_generate_initrd_rpm(remote, path, version)
elif not version or version == 'distro':
version = get_latest_image_version_rpm(remote)
update_grub_rpm(remote, version)
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if package_type == 'deb':
newversion = get_latest_image_version_deb(remote, dist_release)
if 'ubuntu' in dist_release:
grub2conf = teuthology.get_file(remote,
'/boot/grub/grub.cfg', sudo=True).decode()
submenu = ''
menuentry = ''
for line in grub2conf.split('\n'):
if 'submenu' in line:
submenu = line.split('submenu ')[1]
# Ubuntu likes to be sneaky and change formatting of
# grub.cfg between quotes/doublequotes between versions
if submenu.startswith("'"):
submenu = submenu.split("'")[1]
if submenu.startswith('"'):
submenu = submenu.split('"')[1]
if 'menuentry' in line:
if newversion in line and 'recovery' not in line:
menuentry = line.split('\'')[1]
break
if submenu:
grubvalue = submenu + '>' + menuentry
else:
grubvalue = menuentry
grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run(args=['sudo', 'update-grub'])
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if 'debian' in dist_release:
grub2_kernel_select_generic(remote, newversion, 'deb')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
def update_grub_rpm(remote, newversion):
"""
Updates grub file to boot new kernel version on both legacy grub/grub2.
"""
grub='grub2'
# Check if grub2 is isntalled
try:
remote.run(args=['sudo', 'rpm', '-qi', 'grub2-tools'])
except Exception:
grub = 'legacy'
log.info('Updating Grub Version: {grub}'.format(grub=grub))
if grub == 'legacy':
data = ''
#Write new legacy grub entry.
newgrub = generate_legacy_grub_entry(remote, newversion)
for line in newgrub:
data += line + '\n'
temp_file_path = remote.mktemp()
teuthology.sudo_write_file(remote, temp_file_path, StringIO(data), '755')
teuthology.move_file(remote, temp_file_path, '/boot/grub/grub.conf', True)
else:
#Update grub menu entry to new version.
grub2_kernel_select_generic(remote, newversion, 'rpm')
def grub2_kernel_select_generic(remote, newversion, ostype):
"""
Can be used on DEB and RPM. Sets which entry should be boted by entrynum.
"""
log.info("Updating grub on {node} to boot {version}".format(
node=remote.shortname, version=newversion))
if ostype == 'rpm':
grubset = 'grub2-set-default'
mkconfig = 'grub2-mkconfig'
grubconfig = '/boot/grub2/grub.cfg'
if ostype == 'deb':
grubset = 'grub-set-default'
grubconfig = '/boot/grub/grub.cfg'
mkconfig = 'grub-mkconfig'
remote.run(args=['sudo', mkconfig, '-o', grubconfig, ])
grub2conf = teuthology.get_file(remote, grubconfig, sudo=True).decode()
entry_num = 0
if '\nmenuentry ' not in grub2conf:
# okay, do the newer (el8) grub2 thing
grub2conf = remote.sh('sudo /bin/ls /boot/loader/entries || true')
entry = None
for line in grub2conf.split('\n'):
if line.endswith('.conf') and newversion in line:
entry = line[:-5] # drop .conf suffix
break
else:
# do old menuitem counting thing
for line in grub2conf.split('\n'):
if line.startswith('menuentry '):
if newversion in line:
break
entry_num += 1
entry = str(entry_num)
if entry is None:
log.warning('Unable to update grub2 order')
else:
remote.run(args=['sudo', grubset, entry])
def generate_legacy_grub_entry(remote, newversion):
"""
This will likely need to be used for ceph kernels as well
as legacy grub rpm distros don't have an easy way of selecting
a kernel just via a command. This generates an entry in legacy
grub for a new kernel version using the existing entry as a base.
"""
grubconf = teuthology.get_file(remote,
'/boot/grub/grub.conf', sudo=True).decode()
titleline = ''
rootline = ''
kernelline = ''
initline = ''
kernelversion = ''
linenum = 0
titlelinenum = 0
#Grab first kernel entry (title/root/kernel/init lines)
for line in grubconf.split('\n'):
if re.match('^title', line):
titleline = line
titlelinenum = linenum
if re.match('(^\s+)root', line):
rootline = line
if re.match('(^\s+)kernel', line):
kernelline = line
for word in line.split(' '):
if 'vmlinuz' in word:
kernelversion = word.split('vmlinuz-')[-1]
if re.match('(^\s+)initrd', line):
initline = line
if (kernelline != '') and (initline != ''):
break
else:
linenum += 1
#insert new entry into grubconfnew list:
linenum = 0
newgrubconf = []
for line in grubconf.split('\n'):
line = line.rstrip('\n')
if linenum == titlelinenum:
newtitle = re.sub(kernelversion, newversion, titleline)
newroot = re.sub(kernelversion, newversion, rootline)
newkernel = re.sub(kernelversion, newversion, kernelline)
newinit = re.sub(kernelversion, newversion, initline)
newgrubconf.append(newtitle)
newgrubconf.append(newroot)
newgrubconf.append(newkernel)
newgrubconf.append(newinit)
newgrubconf.append('')
newgrubconf.append(line)
else:
newgrubconf.append(line)
linenum += 1
return newgrubconf
def get_image_version(remote, path):
"""
Get kernel image version from (rpm or deb) package.
:param path: (rpm or deb) package path
"""
if remote.os.package_type == 'rpm':
files = remote.sh(['rpm', '-qlp', path])
elif remote.os.package_type == 'deb':
files = remote.sh(['dpkg-deb', '-c', path])
else:
raise UnsupportedPackageTypeError(remote)
for file in files.split('\n'):
if '/boot/vmlinuz-' in file:
version = file.split('/boot/vmlinuz-')[1]
break
log.debug("get_image_version: %s", version)
return version
def get_latest_image_version_rpm(remote):
"""
Get kernel image version of the newest kernel rpm package.
Used for distro case.
"""
dist_release = remote.os.name
kernel_pkg_name = None
version = None
if dist_release in ['opensuse', 'sle']:
kernel_pkg_name = "kernel-default"
else:
kernel_pkg_name = "kernel"
# get tip of package list ordered by install time
newest_package = remote.sh(
'rpm -q %s --last | head -n 1' % kernel_pkg_name).strip()
for kernel in newest_package.split():
if kernel.startswith('kernel'):
if 'ceph' not in kernel:
if dist_release in ['opensuse', 'sle']:
kernel = kernel.split()[0]
version = kernel.split(str(kernel_pkg_name) + '-')[1]
log.debug("get_latest_image_version_rpm: %s", version)
return version
def get_latest_image_version_deb(remote, ostype):
"""
Get kernel image version of the newest kernel deb package.
Used for distro case.
Round-about way to get the newest kernel uname -r compliant version string
from the virtual package which is the newest kenel for debian/ubuntu.
"""
remote.run(args=['sudo', 'apt-get', 'clean'])
remote.run(args=['sudo', 'apt-get', 'update'])
output = StringIO()
newest = ''
# Depend of virtual package has uname -r output in package name. Grab that.
# Note that a dependency list may have multiple comma-separated entries,
# but also each entry may be an alternative (pkg1 | pkg2)
if 'debian' in ostype:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-amd64'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
output.close()
return newest
# Ubuntu is a depend in a depend.
if 'ubuntu' in ostype:
try:
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-current-generic'])
remote.run(args=['dpkg', '-s', 'linux-image-current-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
depends = line.split('Depends: ')[1]
remote.run(args=['sudo', 'apt-get', '-y', 'install',
depends])
remote.run(args=['dpkg', '-s', depends], stdout=output)
except run.CommandFailedError:
# Non precise ubuntu machines (like trusty) don't have
# linux-image-current-generic so use linux-image-generic instead.
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-generic'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
if ',' in newest:
newest = newest.split(',')[0]
if '|' in newest:
# not strictly correct, as any of the |-joined
# packages may satisfy the dependency
newest = newest.split('|')[0].strip()
output.close()
return newest
def get_sha1_from_pkg_name(path):
"""
Get commit hash (min 12 max 40 chars) from (rpm or deb) package name.
Example package names ("make bindeb-pkg" and "make binrpm-pkg"):
linux-image-4.9.0-rc4-ceph-g156db39ecfbd_4.9.0-rc4-ceph-g156db39ecfbd-1_amd64.deb
kernel-4.9.0_rc4_ceph_g156db39ecfbd-2.x86_64.rpm
:param path: (rpm or deb) package path (only basename is used)
"""
basename = os.path.basename(path)
match = re.search('[-_]ceph[-_]g([0-9a-f]{12,40})', basename)
sha1 = match.group(1) if match else None
log.debug("get_sha1_from_pkg_name: %s -> %s -> %s", path, basename, sha1)
return sha1
def task(ctx, config):
"""
Make sure the specified kernel is installed.
This can be a branch, tag, or sha1 of ceph-client.git or a local
kernel package.
To install ceph-client.git branch (default: master)::
kernel:
branch: testing
To install ceph-client.git tag::
kernel:
tag: v3.18
To install ceph-client.git sha1::
kernel:
sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50
To install from a koji build_id::
kernel:
koji: 416058
To install from a koji task_id::
kernel:
koji_task: 9678206
When installing from koji you also need to set the urls for koji hub
and the koji root in your teuthology.yaml config file. These are shown
below with their default values::
kojihub_url: http://koji.fedoraproject.org/kojihub
kojiroot_url: http://kojipkgs.fedoraproject.org/packages
When installing from a koji task_id you also need to set koji_task_url,
which is the base url used to download rpms from koji task results::
koji_task_url: https://kojipkgs.fedoraproject.org/work/
To install local rpm (target should be an rpm system)::
kernel:
rpm: /path/to/appropriately-named.rpm
To install local deb (target should be a deb system)::
kernel:
deb: /path/to/appropriately-named.deb
For rpm: or deb: to work it should be able to figure out sha1 from
local kernel package basename, see get_sha1_from_pkg_name(). This
means that you can't for example install a local tag - package built
with upstream {rpm,deb}-pkg targets won't have a sha1 in its name.
If you want to schedule a run and use a local kernel package, you
have to copy the package over to a box teuthology workers are
running on and specify a path to the package on that box.
All of the above will install a specified kernel on all targets.
You can specify different kernels for each role or for all roles of
a certain type (more specific roles override less specific, see
normalize_config() for details)::
kernel:
client:
tag: v3.0
osd:
branch: btrfs_fixes
client.1:
branch: more_specific
osd.3:
branch: master
To wait 3 minutes for hosts to reboot (default: 300)::
kernel:
timeout: 180
To enable kdb::
kernel:
kdb: true
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task kernel only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {}).get('kernel', {})
config, timeout = normalize_and_apply_overrides(ctx, config, overrides)
validate_config(ctx, config)
log.info('config %s, timeout %d' % (config, timeout))
need_install = {} # sha1 to dl, or path to rpm or deb
need_version = {} # utsrelease or sha1
kdb = {}
for role, role_config in config.items():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type = role_remote.os.name
if role_config.get('rpm') or role_config.get('deb'):
# We only care about path - deb: vs rpm: is meaningless,
# rpm: just happens to be parsed first. Nothing is stopping
# 'deb: /path/to/foo.rpm' and it will work provided remote's
# os.package_type is 'rpm' and vice versa.
path = role_config.get('rpm')
if not path:
path = role_config.get('deb')
sha1 = get_sha1_from_pkg_name(path)
assert sha1, "failed to extract commit hash from path %s" % path
if need_to_install(ctx, role, sha1):
need_install[role] = path
need_version[role] = sha1
elif role_config.get('sha1') == 'distro':
version = need_to_install_distro(role_remote)
if version:
need_install[role] = 'distro'
need_version[role] = version
elif role_config.get("koji") or role_config.get('koji_task'):
# installing a kernel from koji
build_id = role_config.get("koji")
task_id = role_config.get("koji_task")
if role_remote.os.package_type != "rpm":
msg = (
"Installing a kernel from koji is only supported "
"on rpm based systems. System type is {system_type}."
)
msg = msg.format(system_type=system_type)
log.error(msg)
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise ConfigError(msg)
# FIXME: this install should probably happen somewhere else
# but I'm not sure where, so we'll leave it here for now.
install_package('koji', role_remote)
if build_id:
# get information about this build from koji
build_info = get_koji_build_info(build_id, role_remote, ctx)
version = "{ver}-{rel}.x86_64".format(
ver=build_info["version"],
rel=build_info["release"]
)
elif task_id:
# get information about results of this task from koji
task_result = get_koji_task_result(task_id, role_remote, ctx)
# this is not really 'build_info', it's a dict of information
# about the kernel rpm from the task results, but for the sake
# of reusing the code below I'll still call it that.
build_info = get_koji_task_rpm_info(
'kernel',
task_result['rpms']
)
# add task_id so we can know later that we're installing
# from a task and not a build.
build_info["task_id"] = task_id
version = build_info["version"]
if need_to_install(ctx, role, version):
need_install[role] = build_info
need_version[role] = version
else:
builder = get_builder_project()(
"kernel",
role_config,
ctx=ctx,
remote=role_remote,
)
sha1 = builder.sha1
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
if need_to_install(ctx, role, sha1):
if teuth_config.use_shaman:
version = builder.scm_version
else:
version = builder.version
if not version:
raise VersionNotFoundError(builder.base_url)
need_install[role] = sha1
need_version[role] = version
# enable or disable kdb if specified, otherwise do not touch
if role_config.get('kdb') is not None:
kdb[role] = role_config.get('kdb')
if need_install:
install_firmware(ctx, need_install)
download_kernel(ctx, need_install)
install_and_reboot(ctx, need_install)
wait_for_reboot(ctx, need_version, timeout)
enable_disable_kdb(ctx, kdb)
| 36.915038 | 109 | 0.547508 |
import logging
import os
import re
import shlex
from io import StringIO
from teuthology.util.compat import urljoin
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.exceptions import (
UnsupportedPackageTypeError,
ConfigError,
VersionNotFoundError,
)
from teuthology.packaging import (
install_package,
get_koji_build_info,
get_kojiroot_base_url,
get_koji_package_name,
get_koji_task_rpm_info,
get_koji_task_result,
get_builder_project,
)
log = logging.getLogger(__name__)
CONFIG_DEFAULT = {'branch': 'master'}
TIMEOUT_DEFAULT = 300
VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task']
def normalize_config(ctx, config):
if not config or \
len([x for x in config.keys() if x in
VERSION_KEYS + ['kdb', 'flavor']]) == len(config.keys()):
new_config = {}
if not config:
config = CONFIG_DEFAULT
for role in teuthology.all_roles(ctx.cluster):
new_config[role] = config.copy()
return new_config
new_config = {}
for role, role_config in config.items():
if role_config is None:
role_config = CONFIG_DEFAULT
if '.' in role:
new_config[role] = role_config.copy()
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
if name not in config:
new_config[name] = role_config.copy()
return new_config
def normalize_and_apply_overrides(ctx, config, overrides):
timeout = TIMEOUT_DEFAULT
if 'timeout' in config:
timeout = config.pop('timeout')
config = normalize_config(ctx, config)
log.debug('normalized config %s' % config)
if 'timeout' in overrides:
timeout = overrides.pop('timeout')
if overrides:
overrides = normalize_config(ctx, overrides)
log.debug('normalized overrides %s' % overrides)
for role, role_config in config.items():
if (role in overrides and
any(k in overrides[role] for k in VERSION_KEYS)):
for k in VERSION_KEYS:
role_config.pop(k, None)
teuthology.deep_merge(config, overrides)
return (config, timeout)
def validate_config(ctx, config):
for _, roles_for_host in ctx.cluster.remotes.items():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
if kernel is None:
kernel = role_kernel
elif role_kernel is not None:
assert kernel == role_kernel, \
"everything on the same host must use the same kernel"
if role in config:
del config[role]
def need_to_install(ctx, role, version):
ret = True
log.info('Checking kernel version of {role}, want "{ver}"...'.format(
role=role, ver=version))
uname_fp = StringIO()
ctx.cluster.only(role).run(
args=[
'uname',
'-r',
],
stdout=uname_fp,
)
cur_version = uname_fp.getvalue().rstrip('\n')
log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version,
want=version))
if '.' in str(version):
if cur_version == version:
log.debug('utsrelease strings match, do not need to install')
ret = False
else:
match = re.search('[-_]g([0-9a-f]{6,40})', cur_version)
if match:
cur_sha1 = match.group(1)
log.debug('extracting sha1, {ver} -> {sha1}'.format(
ver=cur_version, sha1=cur_sha1))
m = min(len(cur_sha1), len(version))
assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m
if cur_sha1[0:m] == version[0:m]:
log.debug('extracted sha1 matches, do not need to install')
ret = False
else:
log.debug('failed to parse current kernel version')
uname_fp.close()
return ret
def install_firmware(ctx, config):
linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
for role in config.keys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
if package_type == 'rpm':
role_remote.run(args=[
'sudo', 'yum', 'upgrade', '-y', 'linux-firmware',
])
continue
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'test', '-d', '.git',
run.Raw('&&'),
'test', '!', '-s', '.git/HEAD',
run.Raw('&&'),
'sudo', 'rm', '-rf', '.git',
run.Raw(';'),
'sudo', 'install', '-d', '-m0755', fw_dir,
run.Raw('&&'),
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'init',
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
'--get', 'remote.origin.url', run.Raw('>/dev/null'),
run.Raw('||'),
'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
'remote', 'add', 'origin', uri,
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'remote',
'set-url', 'origin', uri, run.Raw('>/dev/null')
]
)
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'fetch', 'origin',
run.Raw('&&'),
'sudo', 'git', 'reset', '--hard', 'origin/master'
],
)
def gitbuilder_pkg_name(remote):
if remote.os.package_type == 'rpm':
pkg_name = 'kernel.x86_64.rpm'
elif remote.os.package_type == 'deb':
pkg_name = 'linux-image.deb'
else:
raise UnsupportedPackageTypeError(remote)
return pkg_name
def remote_pkg_path(remote):
return os.path.join('/tmp', gitbuilder_pkg_name(remote))
def download_kernel(ctx, config):
procs = {}
for role, src in config.items():
needs_download = False
if src == 'distro':
log.debug("src is distro, skipping download");
continue
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, dict):
# we're downloading a kernel from koji, the src dict here
if src.get("id"):
build_id = src["id"]
log.info("Downloading kernel with build_id {build_id} on {role}...".format(
build_id=build_id,
role=role
))
needs_download = True
baseurl = get_kojiroot_base_url(src)
pkg_name = get_koji_package_name("kernel", src)
elif src.get("task_id"):
needs_download = True
log.info("Downloading kernel with task_id {task_id} on {role}...".format(
task_id=src["task_id"],
role=role
))
baseurl = src["base_url"]
# but the full name of the rpm file to download.
pkg_name = src["rpm_name"]
elif src.find('/') >= 0:
# local package - src is path
log.info('Copying kernel package {path} to {role}...'.format(
path=src, role=role))
role_remote.put_file(src,remote_pkg_path(role_remote))
else:
# gitbuilder package - src is sha1
log.info('Downloading kernel {sha1} on {role}...'.format(
sha1=src,
role=role,
))
needs_download = True
builder = get_builder_project()(
'kernel',
{'sha1': src},
ctx=ctx,
remote=role_remote,
)
if teuth_config.use_shaman:
if role_remote.os.package_type == 'rpm':
arch = builder.arch
baseurl = urljoin(
builder.base_url,
'/'.join([arch, ''])
)
pkg_name = "kernel-%s.%s.rpm" % (
builder.version,
arch,
)
elif role_remote.os.package_type == 'deb':
arch = 'amd64' # FIXME
baseurl = urljoin(
builder.base_url,
'/'.join([
'pool', 'main', 'l',
'linux-%s' % builder.scm_version, ''
])
)
pkg_name = 'linux-image-%s_%s_%s.deb' % (
builder.scm_version,
builder.version,
arch,
)
else:
baseurl = builder.base_url + "/"
pkg_name = gitbuilder_pkg_name(role_remote)
log.info("fetching, builder baseurl is %s", baseurl)
if needs_download:
proc = role_remote.run(
args=[
'rm', '-f', remote_pkg_path(role_remote),
run.Raw('&&'),
'echo',
pkg_name,
run.Raw('|'),
'wget',
'-nv',
'-O',
remote_pkg_path(role_remote),
'--base={url}'.format(url=baseurl),
'--input-file=-',
],
wait=False)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
def _no_grub_link(in_file, remote, kernel_ver):
boot1 = '/boot/%s' % in_file
boot2 = '%s.old' % boot1
remote.run(
args=[
'if', 'test', '-e', boot1, run.Raw(';'), 'then',
'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',],
)
remote.run(
args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ],
)
def install_latest_rh_kernel(ctx, config):
if config is None:
config = {}
if config.get('skip'):
return
with parallel() as p:
for remote in ctx.cluster.remotes.keys():
p.spawn(update_rh_kernel, remote)
def update_rh_kernel(remote):
package_type = remote.os.package_type
remote.run(args=['uname', '-a'])
import time
if package_type == 'rpm':
update_log = remote.sh('sudo yum update -y kernel')
log.info(update_log)
if not update_log.find("Installed") == -1:
log.info("Kernel updated to latest z stream on %s", remote.shortname)
log.info("Rebooting %s", remote.shortname)
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
time.sleep(40)
log.info("Reconnecting after reboot")
remote.reconnect(timeout=300)
remote.run(args=['uname', '-a'])
elif not update_log.find('No packages marked for update') == -1:
log.info("Latest version already installed on %s", remote.shortname)
def install_and_reboot(ctx, config):
procs = {}
kernel_title = ''
for role, src in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_kernel(role_remote, version=src)
continue
log.info('Installing kernel {src} on {role}...'.format(src=src,
role=role))
package_type = role_remote.os.package_type
if package_type == 'rpm':
proc = role_remote.run(
args=[
'sudo',
'rpm',
'-ivh',
'--oldpackage',
'--replacefiles',
'--replacepkgs',
remote_pkg_path(role_remote),
])
install_kernel(role_remote, remote_pkg_path(role_remote))
continue
# TODO: Refactor this into install_kernel() so that it handles all
# cases for both rpm and deb packages.
proc = role_remote.run(
args=[
# install the kernel deb
'sudo',
'dpkg',
'-i',
remote_pkg_path(role_remote),
],
)
# collect kernel image name from the .deb
kernel_title = get_image_version(role_remote,
remote_pkg_path(role_remote))
log.info('searching for kernel {}'.format(kernel_title))
if kernel_title.endswith("-highbank"):
_no_grub_link('vmlinuz', role_remote, kernel_title)
_no_grub_link('initrd.img', role_remote, kernel_title)
proc = role_remote.run(
args=[
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
continue
# look for menuentry for our kernel, and collect any
# submenu entries for their titles. Assume that if our
# kernel entry appears later in the file than a submenu entry,
# it's actually nested under that submenu. If it gets more
kernel_entries = role_remote.sh([
'egrep',
'(submenu|menuentry.*' + kernel_title + ').*{',
'/boot/grub/grub.cfg'
]).split('\n')
submenu_title = ''
default_title = ''
for l in kernel_entries:
fields = shlex.split(l)
if len(fields) >= 2:
command, title = fields[:2]
if command == 'submenu':
submenu_title = title + '>'
if command == 'menuentry':
if title.endswith(kernel_title):
default_title = title
break
log.info('submenu_title:{}'.format(submenu_title))
log.info('default_title:{}'.format(default_title))
proc = role_remote.run(
args=[
'/bin/echo',
'-e',
r'cat <<EOF\nset default="' + submenu_title + \
default_title + r'"\nEOF\n',
# up yet; use sudo tee so we are able to write to /etc
run.Raw('|'),
'sudo',
'tee',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('>/dev/null'),
run.Raw('&&'),
'sudo',
'chmod',
'a+x',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('&&'),
'sudo',
'mv',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
'/etc/grub.d/01_ceph_kernel',
# update grub again so it accepts our default
run.Raw('&&'),
'sudo',
'update-grub',
run.Raw('&&'),
'rm',
remote_pkg_path(role_remote),
run.Raw('&&'),
# work around a systemd issue, where network gets shut down
# before ssh can close its session
run.Raw('('),
'sleep',
'1',
run.Raw('&&'),
'sudo',
'shutdown',
'-r',
'now',
run.Raw('&'),
run.Raw(')'),
],
wait=False,
)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
def enable_disable_kdb(ctx, config):
for role, enable in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
serialdev = "ttyS1"
if enable:
log.info('Enabling kdb on {role}...'.format(role=role))
try:
role_remote.run(
args=[
'echo', serialdev,
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
try:
role_remote.run(
args=[
'echo', '',
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc',
run.Raw('|'),
'true',
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, distro=False):
import time
time.sleep(30)
starttime = time.time()
while need_install:
teuthology.reconnect(ctx, timeout)
for client in list(need_install.keys()):
if 'distro' in str(need_install[client]):
distro = True
log.info('Checking client {client} for new kernel version...'.format(client=client))
try:
if distro:
(remote,) = ctx.cluster.only(client).remotes.keys()
assert not need_to_install_distro(remote), \
'failed to install new distro kernel version within timeout'
else:
assert not need_to_install(ctx, client, need_install[client]), \
'failed to install new kernel version within timeout'
del need_install[client]
except Exception:
log.exception("Saw exception")
if time.time() - starttime > timeout:
raise
time.sleep(1)
def get_version_of_running_kernel(remote):
dist_release = remote.os.name
uname_r = remote.sh("uname -r").strip()
current = None
if dist_release in ['opensuse', 'sle']:
current = re.sub(r"-default$", "", uname_r)
else:
current = uname_r
return current
def need_to_install_distro(remote):
dist_release = remote.os.name
package_type = remote.os.package_type
current = get_version_of_running_kernel(remote)
log.info("Running kernel on {node}: {version}".format(
node=remote.shortname, version=current))
installed_version = None
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
install_stdout = remote.sh(
'sudo zypper --non-interactive install kernel-default'
)
else:
install_stdout = remote.sh(
'sudo yum install -y kernel'
)
match = re.search(
"Package (.*) already installed",
install_stdout, flags=re.MULTILINE)
if 'Nothing to do' in install_stdout:
installed_version = match.groups()[0] if match else ''
err_mess = StringIO()
err_mess.truncate(0)
remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum',
'reinstall', 'kernel', run.Raw('||'), 'true'],
stderr=err_mess)
reinstall_stderr = err_mess.getvalue()
err_mess.close()
if 'Skipping the running kernel' in reinstall_stderr:
running_version = re.search(
"Skipping the running kernel: (.*)",
reinstall_stderr, flags=re.MULTILINE).groups()[0]
if installed_version == running_version:
log.info(
'Newest distro kernel already installed and running')
return False
else:
remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel',
run.Raw('||'), 'true'])
newest = get_latest_image_version_rpm(remote)
if package_type == 'deb':
newest = get_latest_image_version_deb(remote, dist_release)
if current in newest or current.replace('-', '_') in newest:
log.info('Newest distro kernel installed and running')
return False
log.info(
'Not newest distro kernel. Current: {cur} Expected: {new}'.format(
cur=current, new=newest))
return newest
def maybe_generate_initrd_rpm(remote, path, version):
out = remote.sh(['rpm', '--scripts', '-qp', path])
if 'bin/installkernel' in out or 'bin/kernel-install' in out:
return
log.info("No installkernel or kernel-install hook in %s, "
"will generate initrd for %s", path, version)
remote.run(
args=[
'sudo',
'mkinitrd',
'--allow-missing',
'-f',
'/boot/initramfs-' + version + '.img',
version,
])
def install_kernel(remote, path=None, version=None):
dist_release = remote.os.name
templ = "install_kernel(remote={remote}, path={path}, version={version})"
log.debug(templ.format(remote=remote, path=path, version=version))
package_type = remote.os.package_type
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
pass
else:
if path:
version = get_image_version(remote, path)
maybe_generate_initrd_rpm(remote, path, version)
elif not version or version == 'distro':
version = get_latest_image_version_rpm(remote)
update_grub_rpm(remote, version)
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if package_type == 'deb':
newversion = get_latest_image_version_deb(remote, dist_release)
if 'ubuntu' in dist_release:
grub2conf = teuthology.get_file(remote,
'/boot/grub/grub.cfg', sudo=True).decode()
submenu = ''
menuentry = ''
for line in grub2conf.split('\n'):
if 'submenu' in line:
submenu = line.split('submenu ')[1]
# Ubuntu likes to be sneaky and change formatting of
# grub.cfg between quotes/doublequotes between versions
if submenu.startswith("'"):
submenu = submenu.split("'")[1]
if submenu.startswith('"'):
submenu = submenu.split('"')[1]
if 'menuentry' in line:
if newversion in line and 'recovery' not in line:
menuentry = line.split('\'')[1]
break
if submenu:
grubvalue = submenu + '>' + menuentry
else:
grubvalue = menuentry
grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run(args=['sudo', 'update-grub'])
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if 'debian' in dist_release:
grub2_kernel_select_generic(remote, newversion, 'deb')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
def update_grub_rpm(remote, newversion):
grub='grub2'
try:
remote.run(args=['sudo', 'rpm', '-qi', 'grub2-tools'])
except Exception:
grub = 'legacy'
log.info('Updating Grub Version: {grub}'.format(grub=grub))
if grub == 'legacy':
data = ''
newgrub = generate_legacy_grub_entry(remote, newversion)
for line in newgrub:
data += line + '\n'
temp_file_path = remote.mktemp()
teuthology.sudo_write_file(remote, temp_file_path, StringIO(data), '755')
teuthology.move_file(remote, temp_file_path, '/boot/grub/grub.conf', True)
else:
grub2_kernel_select_generic(remote, newversion, 'rpm')
def grub2_kernel_select_generic(remote, newversion, ostype):
log.info("Updating grub on {node} to boot {version}".format(
node=remote.shortname, version=newversion))
if ostype == 'rpm':
grubset = 'grub2-set-default'
mkconfig = 'grub2-mkconfig'
grubconfig = '/boot/grub2/grub.cfg'
if ostype == 'deb':
grubset = 'grub-set-default'
grubconfig = '/boot/grub/grub.cfg'
mkconfig = 'grub-mkconfig'
remote.run(args=['sudo', mkconfig, '-o', grubconfig, ])
grub2conf = teuthology.get_file(remote, grubconfig, sudo=True).decode()
entry_num = 0
if '\nmenuentry ' not in grub2conf:
grub2conf = remote.sh('sudo /bin/ls /boot/loader/entries || true')
entry = None
for line in grub2conf.split('\n'):
if line.endswith('.conf') and newversion in line:
entry = line[:-5]
break
else:
for line in grub2conf.split('\n'):
if line.startswith('menuentry '):
if newversion in line:
break
entry_num += 1
entry = str(entry_num)
if entry is None:
log.warning('Unable to update grub2 order')
else:
remote.run(args=['sudo', grubset, entry])
def generate_legacy_grub_entry(remote, newversion):
grubconf = teuthology.get_file(remote,
'/boot/grub/grub.conf', sudo=True).decode()
titleline = ''
rootline = ''
kernelline = ''
initline = ''
kernelversion = ''
linenum = 0
titlelinenum = 0
for line in grubconf.split('\n'):
if re.match('^title', line):
titleline = line
titlelinenum = linenum
if re.match('(^\s+)root', line):
rootline = line
if re.match('(^\s+)kernel', line):
kernelline = line
for word in line.split(' '):
if 'vmlinuz' in word:
kernelversion = word.split('vmlinuz-')[-1]
if re.match('(^\s+)initrd', line):
initline = line
if (kernelline != '') and (initline != ''):
break
else:
linenum += 1
linenum = 0
newgrubconf = []
for line in grubconf.split('\n'):
line = line.rstrip('\n')
if linenum == titlelinenum:
newtitle = re.sub(kernelversion, newversion, titleline)
newroot = re.sub(kernelversion, newversion, rootline)
newkernel = re.sub(kernelversion, newversion, kernelline)
newinit = re.sub(kernelversion, newversion, initline)
newgrubconf.append(newtitle)
newgrubconf.append(newroot)
newgrubconf.append(newkernel)
newgrubconf.append(newinit)
newgrubconf.append('')
newgrubconf.append(line)
else:
newgrubconf.append(line)
linenum += 1
return newgrubconf
def get_image_version(remote, path):
if remote.os.package_type == 'rpm':
files = remote.sh(['rpm', '-qlp', path])
elif remote.os.package_type == 'deb':
files = remote.sh(['dpkg-deb', '-c', path])
else:
raise UnsupportedPackageTypeError(remote)
for file in files.split('\n'):
if '/boot/vmlinuz-' in file:
version = file.split('/boot/vmlinuz-')[1]
break
log.debug("get_image_version: %s", version)
return version
def get_latest_image_version_rpm(remote):
dist_release = remote.os.name
kernel_pkg_name = None
version = None
if dist_release in ['opensuse', 'sle']:
kernel_pkg_name = "kernel-default"
else:
kernel_pkg_name = "kernel"
newest_package = remote.sh(
'rpm -q %s --last | head -n 1' % kernel_pkg_name).strip()
for kernel in newest_package.split():
if kernel.startswith('kernel'):
if 'ceph' not in kernel:
if dist_release in ['opensuse', 'sle']:
kernel = kernel.split()[0]
version = kernel.split(str(kernel_pkg_name) + '-')[1]
log.debug("get_latest_image_version_rpm: %s", version)
return version
def get_latest_image_version_deb(remote, ostype):
remote.run(args=['sudo', 'apt-get', 'clean'])
remote.run(args=['sudo', 'apt-get', 'update'])
output = StringIO()
newest = ''
if 'debian' in ostype:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-amd64'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
output.close()
return newest
if 'ubuntu' in ostype:
try:
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-current-generic'])
remote.run(args=['dpkg', '-s', 'linux-image-current-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
depends = line.split('Depends: ')[1]
remote.run(args=['sudo', 'apt-get', '-y', 'install',
depends])
remote.run(args=['dpkg', '-s', depends], stdout=output)
except run.CommandFailedError:
# linux-image-current-generic so use linux-image-generic instead.
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-generic'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
if ',' in newest:
newest = newest.split(',')[0]
if '|' in newest:
# not strictly correct, as any of the |-joined
# packages may satisfy the dependency
newest = newest.split('|')[0].strip()
output.close()
return newest
def get_sha1_from_pkg_name(path):
basename = os.path.basename(path)
match = re.search('[-_]ceph[-_]g([0-9a-f]{12,40})', basename)
sha1 = match.group(1) if match else None
log.debug("get_sha1_from_pkg_name: %s -> %s -> %s", path, basename, sha1)
return sha1
def task(ctx, config):
if config is None:
config = {}
assert isinstance(config, dict), \
"task kernel only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {}).get('kernel', {})
config, timeout = normalize_and_apply_overrides(ctx, config, overrides)
validate_config(ctx, config)
log.info('config %s, timeout %d' % (config, timeout))
need_install = {} # sha1 to dl, or path to rpm or deb
need_version = {} # utsrelease or sha1
kdb = {}
for role, role_config in config.items():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type = role_remote.os.name
if role_config.get('rpm') or role_config.get('deb'):
# We only care about path - deb: vs rpm: is meaningless,
# rpm: just happens to be parsed first. Nothing is stopping
# 'deb: /path/to/foo.rpm' and it will work provided remote's
path = role_config.get('rpm')
if not path:
path = role_config.get('deb')
sha1 = get_sha1_from_pkg_name(path)
assert sha1, "failed to extract commit hash from path %s" % path
if need_to_install(ctx, role, sha1):
need_install[role] = path
need_version[role] = sha1
elif role_config.get('sha1') == 'distro':
version = need_to_install_distro(role_remote)
if version:
need_install[role] = 'distro'
need_version[role] = version
elif role_config.get("koji") or role_config.get('koji_task'):
build_id = role_config.get("koji")
task_id = role_config.get("koji_task")
if role_remote.os.package_type != "rpm":
msg = (
"Installing a kernel from koji is only supported "
"on rpm based systems. System type is {system_type}."
)
msg = msg.format(system_type=system_type)
log.error(msg)
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise ConfigError(msg)
install_package('koji', role_remote)
if build_id:
build_info = get_koji_build_info(build_id, role_remote, ctx)
version = "{ver}-{rel}.x86_64".format(
ver=build_info["version"],
rel=build_info["release"]
)
elif task_id:
task_result = get_koji_task_result(task_id, role_remote, ctx)
# about the kernel rpm from the task results, but for the sake
# of reusing the code below I'll still call it that.
build_info = get_koji_task_rpm_info(
'kernel',
task_result['rpms']
)
# from a task and not a build.
build_info["task_id"] = task_id
version = build_info["version"]
if need_to_install(ctx, role, version):
need_install[role] = build_info
need_version[role] = version
else:
builder = get_builder_project()(
"kernel",
role_config,
ctx=ctx,
remote=role_remote,
)
sha1 = builder.sha1
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
if need_to_install(ctx, role, sha1):
if teuth_config.use_shaman:
version = builder.scm_version
else:
version = builder.version
if not version:
raise VersionNotFoundError(builder.base_url)
need_install[role] = sha1
need_version[role] = version
# enable or disable kdb if specified, otherwise do not touch
if role_config.get('kdb') is not None:
kdb[role] = role_config.get('kdb')
if need_install:
install_firmware(ctx, need_install)
download_kernel(ctx, need_install)
install_and_reboot(ctx, need_install)
wait_for_reboot(ctx, need_version, timeout)
enable_disable_kdb(ctx, kdb)
| true | true |
f71b18b542139a4c825daebbad8c706309282806 | 5,588 | py | Python | tianshou/policy/modelfree/discrete_sac.py | danagi/tianshou | b364f1a26f1b8528b01a445a488160ce2d910a1c | [
"MIT"
] | 1 | 2020-08-25T07:55:52.000Z | 2020-08-25T07:55:52.000Z | tianshou/policy/modelfree/discrete_sac.py | q-learning-trader/tianshou | c97aa4065ee8464bd5897bb86f1f81abd8e2cff9 | [
"MIT"
] | null | null | null | tianshou/policy/modelfree/discrete_sac.py | q-learning-trader/tianshou | c97aa4065ee8464bd5897bb86f1f81abd8e2cff9 | [
"MIT"
] | 1 | 2020-04-25T13:05:21.000Z | 2020-04-25T13:05:21.000Z | import torch
import numpy as np
from torch.distributions import Categorical
from typing import Any, Dict, Tuple, Union, Optional
from tianshou.policy import SACPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch
class DiscreteSACPolicy(SACPolicy):
"""Implementation of SAC for Discrete Action Settings. arXiv:1910.07207.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s -> Q(s))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s -> Q(s))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy
regularization coefficient, default to 0.2.
If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then
alpha is automatatically tuned.
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to ``False``.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to ``False``.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[
float, Tuple[float, torch.Tensor, torch.optim.Optimizer]
] = 0.2,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,
critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,
reward_normalization, ignore_done, estimation_step,
**kwargs)
self._alpha: Union[float, torch.Tensor]
def forward( # type: ignore
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, h = self.actor(obs, state=state, info=batch.info)
dist = Categorical(logits=logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice] # batch.obs: s_{t+n}
with torch.no_grad():
obs_next_result = self(batch, input="obs_next")
dist = obs_next_result.dist
target_q = dist.probs * torch.min(
self.critic1_old(batch.obs_next),
self.critic2_old(batch.obs_next),
)
target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(
batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
# critic 1
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy
+ (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
| 37.503356 | 79 | 0.607552 | import torch
import numpy as np
from torch.distributions import Categorical
from typing import Any, Dict, Tuple, Union, Optional
from tianshou.policy import SACPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch
class DiscreteSACPolicy(SACPolicy):
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[
float, Tuple[float, torch.Tensor, torch.optim.Optimizer]
] = 0.2,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,
critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,
reward_normalization, ignore_done, estimation_step,
**kwargs)
self._alpha: Union[float, torch.Tensor]
def forward(
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, h = self.actor(obs, state=state, info=batch.info)
dist = Categorical(logits=logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice]
with torch.no_grad():
obs_next_result = self(batch, input="obs_next")
dist = obs_next_result.dist
target_q = dist.probs * torch.min(
self.critic1_old(batch.obs_next),
self.critic2_old(batch.obs_next),
)
target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(
batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy
+ (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item()
return result
| true | true |
f71b19a1735a916e1c2e81907fdea6d406f03d8f | 6,275 | py | Python | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script runs tests on the system to check for compliance against different CIS Benchmarks.
No changes are made to system files by this script. Audit only.
License: MIT
"""
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, ' ')} {test.description.ljust(desc_padding, ' ')} {'Yes ' if test.scored else 'No '} {'1, 2 ' if len(test.level) == 2 else str(test.level[0]).ljust(5, ' ')} {'Error ' if test.error else 'Pass ' if test.passed else 'Fail '}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
| 38.734568 | 282 | 0.637131 |
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, ' ')} {test.description.ljust(desc_padding, ' ')} {'Yes ' if test.scored else 'No '} {'1, 2 ' if len(test.level) == 2 else str(test.level[0]).ljust(5, ' ')} {'Error ' if test.error else 'Pass ' if test.passed else 'Fail '}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
| true | true |
f71b1ab7a5127da844d3de34669b1e4ea7ec03cf | 4,069 | py | Python | tests/components/smartthings/test_lock.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/smartthings/test_lock.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/smartthings/test_lock.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """
Test for the SmartThings lock platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
from openpeerpower.components.lock import DOMAIN as LOCK_DOMAIN
from openpeerpower.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from openpeerpower.config_entries import ConfigEntryState
from openpeerpower.const import STATE_UNAVAILABLE
from openpeerpower.helpers import device_registry as dr, entity_registry as er
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(opp, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
entity_registry = er.async_get(opp)
device_registry = dr.async_get(opp)
# Act
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Assert
entry = entity_registry.async_get("lock.lock_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_lock(opp, device_factory):
"""Test the lock locks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock])
device.status.attributes[Attribute.lock] = Status(
"unlocked",
None,
{
"method": "Manual",
"codeId": None,
"codeName": "Code 1",
"lockName": "Front Door",
"usedCode": "Code 2",
},
)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Act
await opp.services.async_call(
LOCK_DOMAIN, "lock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
assert state.attributes["method"] == "Manual"
assert state.attributes["lock_state"] == "locked"
assert state.attributes["code_name"] == "Code 1"
assert state.attributes["used_code"] == "Code 2"
assert state.attributes["lock_name"] == "Front Door"
assert "code_id" not in state.attributes
async def test_unlock(opp, device_factory):
"""Test the lock unlocks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Act
await opp.services.async_call(
LOCK_DOMAIN, "unlock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "unlocked"
async def test_update_from_signal(opp, device_factory):
"""Test the lock updates when receiving a signal."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await device.lock(True)
# Act
async_dispatcher_send(opp, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await opp.async_block_till_done()
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
async def test_unload_config_entry(opp, device_factory):
"""Test the lock is removed when the config entry is unloaded."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
config_entry = await setup_platform(opp, LOCK_DOMAIN, devices=[device])
config_entry.state = ConfigEntryState.LOADED
# Act
await opp.config_entries.async_forward_entry_unload(config_entry, "lock")
# Assert
assert opp.states.get("lock.lock_1").state == STATE_UNAVAILABLE
| 36.330357 | 88 | 0.706562 | from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
from openpeerpower.components.lock import DOMAIN as LOCK_DOMAIN
from openpeerpower.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from openpeerpower.config_entries import ConfigEntryState
from openpeerpower.const import STATE_UNAVAILABLE
from openpeerpower.helpers import device_registry as dr, entity_registry as er
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
entity_registry = er.async_get(opp)
device_registry = dr.async_get(opp)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
entry = entity_registry.async_get("lock.lock_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_lock(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock])
device.status.attributes[Attribute.lock] = Status(
"unlocked",
None,
{
"method": "Manual",
"codeId": None,
"codeName": "Code 1",
"lockName": "Front Door",
"usedCode": "Code 2",
},
)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await opp.services.async_call(
LOCK_DOMAIN, "lock", {"entity_id": "lock.lock_1"}, blocking=True
)
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
assert state.attributes["method"] == "Manual"
assert state.attributes["lock_state"] == "locked"
assert state.attributes["code_name"] == "Code 1"
assert state.attributes["used_code"] == "Code 2"
assert state.attributes["lock_name"] == "Front Door"
assert "code_id" not in state.attributes
async def test_unlock(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await opp.services.async_call(
LOCK_DOMAIN, "unlock", {"entity_id": "lock.lock_1"}, blocking=True
)
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "unlocked"
async def test_update_from_signal(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await device.lock(True)
async_dispatcher_send(opp, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
await opp.async_block_till_done()
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
async def test_unload_config_entry(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
config_entry = await setup_platform(opp, LOCK_DOMAIN, devices=[device])
config_entry.state = ConfigEntryState.LOADED
await opp.config_entries.async_forward_entry_unload(config_entry, "lock")
assert opp.states.get("lock.lock_1").state == STATE_UNAVAILABLE
| true | true |
f71b1af96cc4062163b2c26f68ca4f8e5e16759c | 21,877 | py | Python | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | 2 | 2022-02-24T14:06:42.000Z | 2022-02-24T14:11:05.000Z | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | null | null | null | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint checks used by all the linters."""
from __future__ import annotations
import os
import re
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import build
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*', 'proto_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'core/templates/google-analytics.initializer.ts',
'extensions/classifiers/proto/*', '*.rtl.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'core/templates/expressions/parser.js',)
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_STRINGS_CONSTANTS = {
'"DEV_MODE": false': {
'message': 'Please set the DEV_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
},
'"EMULATOR_MODE": false': {
'message': 'Please set the EMULATOR_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please link TODO comments to an issue '
'in the format TODO(#issuenum): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import annotations'),
'message': 'Please ensure this file should contain annotations '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'ndb\.'),
'message': (
'Please use datastore_services instead of ndb, for example:\n'
'\n'
'datastore_services = models.Registry.import_datastore_services()\n'
'\n'
'class SampleModel(datastore_services.Model):\n'
' ...\n'),
'excluded_files': (),
'excluded_dirs': ('core/platform',),
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py',
'core/tests/test_utils_test.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
]
BAD_PATTERNS_MAP = {
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool. Whether to exclude the given file from this
particular pattern check.
"""
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : Object containing details for
the pattern to be checked. Pattern to match:
message: str. Message to show if pattern matches.
excluded_files: tuple(str). Files to be excluded from matching.
excluded_dirs: tuple(str). Directories to be excluded from
matching).
Returns:
tuple(bool, list(str)). A 2-tuple whose first element is a bool
which set to True if there is bad pattern found else False, whose second
element is a list of failed messages.
"""
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter:
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
"""
def __init__(self, files_to_lint, file_cache):
"""Constructs a GeneralPurposeLinter object.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
"""Returns all file paths."""
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
error_messages = []
try:
file_content = self.file_cache.readlines(filepath)
except Exception as e:
raise Exception('%s %s' % (filepath, e)) from e
for index, regexp_to_check in enumerate(
pattern_list):
if (any(filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])) and (
not any(
filepath.endswith(
pattern) for pattern in (
regexp_to_check['excluded_files'] +
regexp_to_check['excluded_dirs'])))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern, error in BAD_PATTERNS.items():
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
error['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath.endswith('constants.ts'):
for pattern, constants in BAD_STRINGS_CONSTANTS.items():
for line in file_content:
if pattern in line:
failed = True
error_message = ('%s --> %s' % (
filepath,
constants['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
"""This function is used to detect newline at the end of file."""
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_disallowed_flags(self):
"""This function is used to disallow flags."""
name = 'Disallow flags'
disallow_flag = (
'eslint-disable-next-line oppia/no-bypass-security-phrase')
error_messages = []
files_to_lint = self.all_filepaths
failed = False
excluded_files = (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES)
allowed_files = ''
for filepath in files_to_lint:
for excluded_file in excluded_files:
if excluded_file in filepath:
allowed_files = filepath
if not filepath.endswith('.ts') or filepath == allowed_files:
continue
file_content = self.file_cache.read(filepath)
if disallow_flag in file_content:
error_message = (
'%s --> Please do not use "no-bypass-security-phrase" flag.'
' It is only expected to be used in files listed in'
' warranted_angular_security_bypasses.py' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
Returns:
TaskResult. A TaskResult object representing the result of the lint
check.
"""
name = 'Extra JS files'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
if filepath.endswith(
('.js')) and filepath.startswith(
('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD
) and not filepath.endswith('protractor.js'):
error_message = (
'%s --> Found extra .js file' % filepath)
error_messages.append(error_message)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts')
error_messages.append(err_msg)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
list(TaskResult). A list of TaskResult objects representing the
results of the lint checks.
"""
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof(), self.check_extra_js_files(),
self.check_disallowed_flags()]
return task_results
def get_linters(files_to_lint, file_cache):
"""Creates GeneralPurposeLinter object and returns it.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
Returns:
tuple(GeneralPurposeLinter, None). A 2-tuple of custom and third_party
linter objects.
"""
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| 37.589347 | 80 | 0.581341 |
from __future__ import annotations
import os
import re
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import build
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*', 'proto_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'core/templates/google-analytics.initializer.ts',
'extensions/classifiers/proto/*', '*.rtl.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'core/templates/expressions/parser.js',)
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_STRINGS_CONSTANTS = {
'"DEV_MODE": false': {
'message': 'Please set the DEV_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
},
'"EMULATOR_MODE": false': {
'message': 'Please set the EMULATOR_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please link TODO comments to an issue '
'in the format TODO(#issuenum): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import annotations'),
'message': 'Please ensure this file should contain annotations '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'ndb\.'),
'message': (
'Please use datastore_services instead of ndb, for example:\n'
'\n'
'datastore_services = models.Registry.import_datastore_services()\n'
'\n'
'class SampleModel(datastore_services.Model):\n'
' ...\n'),
'excluded_files': (),
'excluded_dirs': ('core/platform',),
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py',
'core/tests/test_utils_test.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
]
BAD_PATTERNS_MAP = {
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter:
def __init__(self, files_to_lint, file_cache):
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
pattern_found_list = []
error_messages = []
try:
file_content = self.file_cache.readlines(filepath)
except Exception as e:
raise Exception('%s %s' % (filepath, e)) from e
for index, regexp_to_check in enumerate(
pattern_list):
if (any(filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])) and (
not any(
filepath.endswith(
pattern) for pattern in (
regexp_to_check['excluded_files'] +
regexp_to_check['excluded_dirs'])))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern, error in BAD_PATTERNS.items():
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
error['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath.endswith('constants.ts'):
for pattern, constants in BAD_STRINGS_CONSTANTS.items():
for line in file_content:
if pattern in line:
failed = True
error_message = ('%s --> %s' % (
filepath,
constants['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_disallowed_flags(self):
name = 'Disallow flags'
disallow_flag = (
'eslint-disable-next-line oppia/no-bypass-security-phrase')
error_messages = []
files_to_lint = self.all_filepaths
failed = False
excluded_files = (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES)
allowed_files = ''
for filepath in files_to_lint:
for excluded_file in excluded_files:
if excluded_file in filepath:
allowed_files = filepath
if not filepath.endswith('.ts') or filepath == allowed_files:
continue
file_content = self.file_cache.read(filepath)
if disallow_flag in file_content:
error_message = (
'%s --> Please do not use "no-bypass-security-phrase" flag.'
' It is only expected to be used in files listed in'
' warranted_angular_security_bypasses.py' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_extra_js_files(self):
name = 'Extra JS files'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
if filepath.endswith(
('.js')) and filepath.startswith(
('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD
) and not filepath.endswith('protractor.js'):
error_message = (
'%s --> Found extra .js file' % filepath)
error_messages.append(error_message)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts')
error_messages.append(err_msg)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof(), self.check_extra_js_files(),
self.check_disallowed_flags()]
return task_results
def get_linters(files_to_lint, file_cache):
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| true | true |
f71b1b5dacb5a5c7e4853d46d9e70c5797445611 | 1,049 | py | Python | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | __author__ = 'Alex Gusev <alex@flancer64.com>'
import prxgt.const as const
from prxgt.domain.meta.attribute import Attribute as AttributeBase
class Attribute(AttributeBase):
"""
Attribute model contains data.
"""
def __init__(self, name=None, type_=None, value=None):
super(Attribute, self).__init__(name, type_)
self._value = value
return
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def meta(self):
"""
META Attribute (name and type only)
:return:
"""
return AttributeBase(self._name, self._type)
def __repr__(self):
result = super(Attribute, self).__repr__()
if (self.value is not None) and (self.type == const.ATTR_TYPE_TXT):
# [name@type='value']
result += "=" + repr(self.value[:4] + "...")
else:
# [name@text='valu...']
result += "=" + repr(self.value)
return result | 26.225 | 75 | 0.578646 | __author__ = 'Alex Gusev <alex@flancer64.com>'
import prxgt.const as const
from prxgt.domain.meta.attribute import Attribute as AttributeBase
class Attribute(AttributeBase):
def __init__(self, name=None, type_=None, value=None):
super(Attribute, self).__init__(name, type_)
self._value = value
return
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def meta(self):
return AttributeBase(self._name, self._type)
def __repr__(self):
result = super(Attribute, self).__repr__()
if (self.value is not None) and (self.type == const.ATTR_TYPE_TXT):
result += "=" + repr(self.value[:4] + "...")
else:
result += "=" + repr(self.value)
return result | true | true |
f71b1c721a28949ee40ddf327761ea6fcd2fe45b | 11,435 | py | Python | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T16:21:44.000Z | 2020-07-23T16:21:44.000Z | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T01:21:01.000Z | 2022-03-11T23:25:57.000Z | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 1 | 2019-07-27T10:00:01.000Z | 2019-07-27T10:00:01.000Z | # -*- coding: utf-8 -*-
"""
Django settings for roojet project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
from decimal import Decimal
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('roojet')
env = environ.Env()
environ.Env.read_env(ROOT_DIR()+'/.env')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'loginas', # loginas
'bootstrap3',
'djrill',
'plans',
'ordered_model',
'payments',
'django_cron',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'roojet.users', # custom users app
# Your stuff: custom apps go here
'roojet.core',
'roojet.services',
'roojet.payment_roojet',
'roojet.mailerlite',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'roojet.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("Nija",'nijap@techversantinfotech.com'),
('Scott','scott@roojet.com'),
)
SERVER_EMAIL = 'dev@swapps.co'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {
# ## Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
# 'default': env.db("DATABASE_URL", default="postgres:///roojet"),
# }
# DATABASES['default']['ATOMIC_REQUESTS'] = True
#DO not harcode. Use environment variables or deployment will fail
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'core:dashboard'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-plans
PLANS_CURRENCY = 'USD'
PLANS_INVOICE_ISSUER = {
"issuer_name": "Joe Doe Company",
"issuer_street": "Django street, 34",
"issuer_zipcode": "123-3444",
"issuer_city": "Djangoko",
"issuer_country": "US", # Must be a country code with 2 characters
"issuer_tax_number": "1222233334444555",
}
PLANS_TAX = Decimal('0.0')
PLANS_TAX_COUNTRY = 'US'
SEND_PLANS_EMAILS = False
# Payment settings
PAYMENT_HOST = env.str("PAYMENT_HOST", default='')
PAYMENT_USES_SSL = env.bool("PAYMENT_USES_SSL", default='')
PAYMENT_MODEL = 'payment_roojet.Payment'
# use sandbox
PAYMENT_VARIANTS = {
'default': ('payments.stripe.StripeProvider', {
'secret_key': env('STRIPE_API_KEY', default=''),
'public_key': env('STRIPE_PUBLISHABLE_KEY', default=''),
'name': 'roojet',
})
}
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
SHOPIFY_API_KEY = env('SHOPIFY_API_KEY', default='')
SHOPIFY_SECRET = env('SHOPIFY_SECRET', default='')
SHOPIFY_URL = 'myshopify.com'
SHOPIFY_AUTHORIZE_SUFIX = '/admin/oauth/authorize'
SHOPIFY_SCOPES = ['read_products', 'read_orders', 'write_products']
MOE_URL = env('MOE_URL', default='')
CODESHIP_API_KEY = env("CODESHIP_API_KEY", default='')
MANDRILL_API_URL = env("MANDRILL_API_URL", default='')
MANDRILL_API_KEY = env('MANDRILL_API_KEY', default='')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='')
ENOUGH_DATA = env.int('ENOUGH_DATA', default=5)
ACCOUNT_ADAPTER = 'roojet.users.adapter.AccountAdapter'
CRON_CLASSES = [
"roojet.core.cron.MyCronJob",
]
| 33.338192 | 98 | 0.61784 |
from __future__ import absolute_import, unicode_literals
import environ
from decimal import Decimal
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('roojet')
env = environ.Env()
environ.Env.read_env(ROOT_DIR()+'/.env')
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'loginas',
'bootstrap3',
'djrill',
'plans',
'ordered_model',
'payments',
'django_cron',
)
LOCAL_APPS = (
'roojet.users',
'roojet.core',
'roojet.services',
'roojet.payment_roojet',
'roojet.mailerlite',
)
= DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIGRATION_MODULES = {
'sites': 'roojet.contrib.sites.migrations'
}
= env.bool("DJANGO_DEBUG", False)
(APPS_DIR.path('fixtures')),
)
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
= (
("Nija",'nijap@techversantinfotech.com'),
('Scott','scott@roojet.com'),
)
SERVER_EMAIL = 'dev@swapps.co'
= ADMINS
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
TIME_ZONE = 'UTC'
= 'en-us'
= 1
= True
= True
= True
= [
{
mplate.backends.django.DjangoTemplates',
': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
ebug': DEBUG,
ders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
sors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
E_PACK = 'bootstrap3'
= str(ROOT_DIR('staticfiles'))
= '/static/'
(APPS_DIR.path('static')),
)
= (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
= str(APPS_DIR('media'))
= '/media/'
ROOT_URLCONF = 'config.urls'
= 'config.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'core:dashboard'
LOGIN_URL = 'account_login'
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
GGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
PLANS_CURRENCY = 'USD'
PLANS_INVOICE_ISSUER = {
"issuer_name": "Joe Doe Company",
"issuer_street": "Django street, 34",
"issuer_zipcode": "123-3444",
"issuer_city": "Djangoko",
"issuer_country": "US",
"issuer_tax_number": "1222233334444555",
}
PLANS_TAX = Decimal('0.0')
PLANS_TAX_COUNTRY = 'US'
SEND_PLANS_EMAILS = False
PAYMENT_HOST = env.str("PAYMENT_HOST", default='')
PAYMENT_USES_SSL = env.bool("PAYMENT_USES_SSL", default='')
PAYMENT_MODEL = 'payment_roojet.Payment'
PAYMENT_VARIANTS = {
'default': ('payments.stripe.StripeProvider', {
'secret_key': env('STRIPE_API_KEY', default=''),
'public_key': env('STRIPE_PUBLISHABLE_KEY', default=''),
'name': 'roojet',
})
}
ADMIN_URL = r'^admin/'
SHOPIFY_API_KEY = env('SHOPIFY_API_KEY', default='')
SHOPIFY_SECRET = env('SHOPIFY_SECRET', default='')
SHOPIFY_URL = 'myshopify.com'
SHOPIFY_AUTHORIZE_SUFIX = '/admin/oauth/authorize'
SHOPIFY_SCOPES = ['read_products', 'read_orders', 'write_products']
MOE_URL = env('MOE_URL', default='')
CODESHIP_API_KEY = env("CODESHIP_API_KEY", default='')
MANDRILL_API_URL = env("MANDRILL_API_URL", default='')
MANDRILL_API_KEY = env('MANDRILL_API_KEY', default='')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='')
ENOUGH_DATA = env.int('ENOUGH_DATA', default=5)
ACCOUNT_ADAPTER = 'roojet.users.adapter.AccountAdapter'
CRON_CLASSES = [
"roojet.core.cron.MyCronJob",
]
| true | true |
f71b1dc503a1e22e10ad5b8a916812053338361e | 3,578 | py | Python | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | from django.test import TestCase
from rest_framework.test import APIRequestFactory, APIClient
from .models import *
from rest_framework import status
# Create your tests here.
class ProductCategoryTest(TestCase):
@classmethod
def setUpTestData(cls):
ProductCategory.objects.create(name_product_category="Infantil")
ProductCategory.objects.create(name_product_category="Adulto")
ProductCategory.objects.create(name_product_category="Idoso")
ProductType.objects.create(name_product_type="camisa")
ProductType.objects.create(name_product_type="calça")
Size.objects.create(size="GG")
Size.objects.create(size="G")
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
size_two = Size.objects.get(id=2)
category_product_two = ProductCategory.objects.get(id=2)
type_product_two = ProductType.objects.get(id=2)
Product.objects.create(
name_product="camisa 1",
price=100,
color="red",
product_category = category_product,
product_size = size,
product_type = type_product
)
Product.objects.create(
name_product="camisa 2",
price=150,
color="green",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 3",
price=190,
color="yellow",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 4",
price=220,
color="magento",
product_category = category_product,
product_size = size,
product_type = type_product
)
def test_create_product_category(self):
factory = APIClient()
product_category = {"name_product_category": "infantil"}
request = factory.post('/createcategory',product_category)
self.assertEquals(request.status_code,status.HTTP_201_CREATED)
def test_list_product_category(self):
factory = APIClient()
products_categories = ProductCategory.objects.all().count()
request = factory.get("/productcategories")
self.assertEquals(products_categories,3)
def test_create_product(self):
factory = APIClient()
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
product = {'name_product':"camisa 1",
'price':100,
'color':"red",
'product_category' : category_product.id,
'size': size.id,
'product_type': type_product.id
}
response = factory.post('/create_product',product)
self.assertEquals(response.status_code,status.HTTP_301_MOVED_PERMANENTLY)
def test_get_product(self):
factory = APIClient()
response = factory.get('/products')
self.assertEquals(response.status_code,status.HTTP_200_OK)
def test_search_price_products(self):
factory = APIClient()
response = factory.get('/searchpriceproduct/80/170')
self.assertEquals(response.status_code,status.HTTP_200_OK)
| 31.663717 | 81 | 0.630799 | from django.test import TestCase
from rest_framework.test import APIRequestFactory, APIClient
from .models import *
from rest_framework import status
class ProductCategoryTest(TestCase):
@classmethod
def setUpTestData(cls):
ProductCategory.objects.create(name_product_category="Infantil")
ProductCategory.objects.create(name_product_category="Adulto")
ProductCategory.objects.create(name_product_category="Idoso")
ProductType.objects.create(name_product_type="camisa")
ProductType.objects.create(name_product_type="calça")
Size.objects.create(size="GG")
Size.objects.create(size="G")
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
size_two = Size.objects.get(id=2)
category_product_two = ProductCategory.objects.get(id=2)
type_product_two = ProductType.objects.get(id=2)
Product.objects.create(
name_product="camisa 1",
price=100,
color="red",
product_category = category_product,
product_size = size,
product_type = type_product
)
Product.objects.create(
name_product="camisa 2",
price=150,
color="green",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 3",
price=190,
color="yellow",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 4",
price=220,
color="magento",
product_category = category_product,
product_size = size,
product_type = type_product
)
def test_create_product_category(self):
factory = APIClient()
product_category = {"name_product_category": "infantil"}
request = factory.post('/createcategory',product_category)
self.assertEquals(request.status_code,status.HTTP_201_CREATED)
def test_list_product_category(self):
factory = APIClient()
products_categories = ProductCategory.objects.all().count()
request = factory.get("/productcategories")
self.assertEquals(products_categories,3)
def test_create_product(self):
factory = APIClient()
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
product = {'name_product':"camisa 1",
'price':100,
'color':"red",
'product_category' : category_product.id,
'size': size.id,
'product_type': type_product.id
}
response = factory.post('/create_product',product)
self.assertEquals(response.status_code,status.HTTP_301_MOVED_PERMANENTLY)
def test_get_product(self):
factory = APIClient()
response = factory.get('/products')
self.assertEquals(response.status_code,status.HTTP_200_OK)
def test_search_price_products(self):
factory = APIClient()
response = factory.get('/searchpriceproduct/80/170')
self.assertEquals(response.status_code,status.HTTP_200_OK)
| true | true |
f71b1e2fd58d30c28062d37e9f1ae54392548686 | 6,411 | py | Python | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 32 | 2021-01-07T15:09:56.000Z | 2022-01-30T05:49:23.000Z | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 228 | 2020-09-03T14:03:54.000Z | 2022-03-31T20:16:12.000Z | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 12 | 2020-09-15T21:36:03.000Z | 2022-03-31T17:13:17.000Z | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v1.model.synthetics_trigger_ci_test_location import SyntheticsTriggerCITestLocation
from datadog_api_client.v1.model.synthetics_trigger_ci_test_run_result import SyntheticsTriggerCITestRunResult
globals()["SyntheticsTriggerCITestLocation"] = SyntheticsTriggerCITestLocation
globals()["SyntheticsTriggerCITestRunResult"] = SyntheticsTriggerCITestRunResult
class SyntheticsTriggerCITestsResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"batch_id": (str,), # noqa: E501
"locations": ([SyntheticsTriggerCITestLocation],), # noqa: E501
"results": ([SyntheticsTriggerCITestRunResult],), # noqa: E501
"triggered_check_ids": ([str],), # noqa: E501
}
discriminator = None
attribute_map = {
"batch_id": "batch_id", # noqa: E501
"locations": "locations", # noqa: E501
"results": "results", # noqa: E501
"triggered_check_ids": "triggered_check_ids", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SyntheticsTriggerCITestsResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
batch_id (str): The public ID of the batch triggered.. [optional] # noqa: E501
locations ([SyntheticsTriggerCITestLocation]): List of Synthetics locations.. [optional] # noqa: E501
results ([SyntheticsTriggerCITestRunResult]): Information about the tests runs.. [optional] # noqa: E501
triggered_check_ids ([str]): The public IDs of the Synthetics test triggered.. [optional] # noqa: E501
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(SyntheticsTriggerCITestsResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 43.910959 | 117 | 0.61644 |
from datadog_api_client.v1.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v1.model.synthetics_trigger_ci_test_location import SyntheticsTriggerCITestLocation
from datadog_api_client.v1.model.synthetics_trigger_ci_test_run_result import SyntheticsTriggerCITestRunResult
globals()["SyntheticsTriggerCITestLocation"] = SyntheticsTriggerCITestLocation
globals()["SyntheticsTriggerCITestRunResult"] = SyntheticsTriggerCITestRunResult
class SyntheticsTriggerCITestsResponse(ModelNormal):
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
"batch_id": (str,),
"locations": ([SyntheticsTriggerCITestLocation],),
"results": ([SyntheticsTriggerCITestRunResult],),
"triggered_check_ids": ([str],),
}
discriminator = None
attribute_map = {
"batch_id": "batch_id",
"locations": "locations",
"results": "results",
"triggered_check_ids": "triggered_check_ids",
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
self = super(SyntheticsTriggerCITestsResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| true | true |
f71b1e406ddf2cb9954d1454378cd13f4b53e7f4 | 1,801 | py | Python | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 4 | 2018-08-26T02:40:09.000Z | 2020-06-21T22:59:04.000Z | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 9 | 2018-04-20T15:37:46.000Z | 2020-07-15T20:22:31.000Z | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 2 | 2018-09-16T23:13:39.000Z | 2021-12-20T16:35:20.000Z | #!/usr/bin/env python3
"""
Polyglot v2 node server for WeatherFlow Weather Station data.
Copyright (c) 2018,2019 Robert Paauwe
"""
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class WindNode(polyinterface.Node):
id = 'wind'
hint = [1,11,4,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 32}, # speed
{'driver': 'GV0', 'value': 0, 'uom': 76}, # direction
{'driver': 'GV1', 'value': 0, 'uom': 32}, # gust
{'driver': 'GV2', 'value': 0, 'uom': 76}, # gust direction
{'driver': 'GV3', 'value': 0, 'uom': 32} # lull
]
def SetUnits(self, u):
self.units = u
if (u == 'kph'):
self.drivers[0]['uom'] = 32
self.drivers[2]['uom'] = 32
self.drivers[4]['uom'] = 32
self.id = 'wind'
if (u == 'ms'):
self.drivers[0]['uom'] = 40
self.drivers[2]['uom'] = 40
self.drivers[4]['uom'] = 40
self.id = 'wind'
elif (u == 'mph'):
self.drivers[0]['uom'] = 48
self.drivers[2]['uom'] = 48
self.drivers[4]['uom'] = 48
self.id = 'windUS'
def setDriver(self, driver, value):
if (driver == 'ST' or driver == 'GV1' or driver == 'GV3'):
if (self.units == 'mph'):
value = round(value / 1.609344, 2)
super(WindNode, self).setDriver(driver, value, report=True, force=True)
def update(self, ws, wd, wg, wl):
self.setDriver('ST', ws)
self.setDriver('GV0', wd)
self.setDriver('GV1', wg)
self.setDriver('GV2', wd)
self.setDriver('GV3', wl)
| 29.52459 | 79 | 0.513048 |
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class WindNode(polyinterface.Node):
id = 'wind'
hint = [1,11,4,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 32},
{'driver': 'GV0', 'value': 0, 'uom': 76},
{'driver': 'GV1', 'value': 0, 'uom': 32},
{'driver': 'GV2', 'value': 0, 'uom': 76},
{'driver': 'GV3', 'value': 0, 'uom': 32}
]
def SetUnits(self, u):
self.units = u
if (u == 'kph'):
self.drivers[0]['uom'] = 32
self.drivers[2]['uom'] = 32
self.drivers[4]['uom'] = 32
self.id = 'wind'
if (u == 'ms'):
self.drivers[0]['uom'] = 40
self.drivers[2]['uom'] = 40
self.drivers[4]['uom'] = 40
self.id = 'wind'
elif (u == 'mph'):
self.drivers[0]['uom'] = 48
self.drivers[2]['uom'] = 48
self.drivers[4]['uom'] = 48
self.id = 'windUS'
def setDriver(self, driver, value):
if (driver == 'ST' or driver == 'GV1' or driver == 'GV3'):
if (self.units == 'mph'):
value = round(value / 1.609344, 2)
super(WindNode, self).setDriver(driver, value, report=True, force=True)
def update(self, ws, wd, wg, wl):
self.setDriver('ST', ws)
self.setDriver('GV0', wd)
self.setDriver('GV1', wg)
self.setDriver('GV2', wd)
self.setDriver('GV3', wl)
| true | true |
f71b1e9aa887b542b88ff076fa75931ec6dc85c7 | 2,532 | py | Python | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.smb_active_session
class SmbActiveFilePath(object):
"""Implementation of the 'SmbActiveFilePath' model.
Specifies a file path in an SMB view that has active sessions and opens.
Attributes:
active_sessions (list of SmbActiveSession): Specifies the sessions
where the file is open.
file_path (string): Specifies the filepath in the view.
view_id (long|int): Specifies the id of the View assigned by the
Cohesity Cluster. Either viewName or viewId must be specified.
view_name (string): Specifies the name of the View.
"""
# Create a mapping from Model property names to API property names
_names = {
"active_sessions":'activeSessions',
"file_path":'filePath',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
active_sessions=None,
file_path=None,
view_id=None,
view_name=None):
"""Constructor for the SmbActiveFilePath class"""
# Initialize members of the class
self.active_sessions = active_sessions
self.file_path = file_path
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
active_sessions = None
if dictionary.get('activeSessions') != None:
active_sessions = list()
for structure in dictionary.get('activeSessions'):
active_sessions.append(cohesity_management_sdk.models.smb_active_session.SmbActiveSession.from_dictionary(structure))
file_path = dictionary.get('filePath')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
# Return an object of this model
return cls(active_sessions,
file_path,
view_id,
view_name)
| 32.461538 | 133 | 0.625197 |
import cohesity_management_sdk.models.smb_active_session
class SmbActiveFilePath(object):
_names = {
"active_sessions":'activeSessions',
"file_path":'filePath',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
active_sessions=None,
file_path=None,
view_id=None,
view_name=None):
self.active_sessions = active_sessions
self.file_path = file_path
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
active_sessions = None
if dictionary.get('activeSessions') != None:
active_sessions = list()
for structure in dictionary.get('activeSessions'):
active_sessions.append(cohesity_management_sdk.models.smb_active_session.SmbActiveSession.from_dictionary(structure))
file_path = dictionary.get('filePath')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
return cls(active_sessions,
file_path,
view_id,
view_name)
| true | true |
f71b206d98b3712aa33781c5090859c94fbf8680 | 373 | py | Python | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | 2 | 2018-11-01T12:38:56.000Z | 2019-10-22T07:02:54.000Z | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | null | null | null | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | null | null | null | class Solvers_Class():
def __init__(self):
pass
def ProperDistanceTabulate(self, Input_Param, z_max):
from Distance_Solver import Proper_Distance_Tabulate
Proper_Distance_Tabulate(Input_Param, z_max)
def LxTxSolver(self, Halos):
from LxTx_Solver import LxTx_Solver
solver = LxTx_Solver()
solver.solve(Halos)
| 23.3125 | 60 | 0.691689 | class Solvers_Class():
def __init__(self):
pass
def ProperDistanceTabulate(self, Input_Param, z_max):
from Distance_Solver import Proper_Distance_Tabulate
Proper_Distance_Tabulate(Input_Param, z_max)
def LxTxSolver(self, Halos):
from LxTx_Solver import LxTx_Solver
solver = LxTx_Solver()
solver.solve(Halos)
| true | true |
f71b20c6a58525d0ad6e5a5b0ad92dbbdf9f5849 | 1,599 | py | Python | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | null | null | null | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | 3 | 2021-06-18T15:13:46.000Z | 2021-06-18T18:24:43.000Z | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
# Create your tests here.
class UserManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
email="normal@user.com", password="testing@123")
self.assertEqual(user.email, 'normal@user.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNotNone(user.username)
self.assertIsNotNone(user.email)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email='')
with self.assertRaises(ValueError):
User.objects.create_user(email='', password="testing@123")
def test_create_superuser(self):
User = get_user_model()
admin = User.objects.create_superuser(
email="admin@user.com", password="testing@123")
self.assertEqual(admin.email, 'admin@user.com')
self.assertTrue(admin.is_active)
self.assertTrue(admin.is_staff)
self.assertTrue(admin.is_superuser)
try:
self.assertIsNotNone(admin.username)
self.assertIsNotNone(admin.email)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_user(
email='', password="testing@123", is_superuser=False)
| 34.76087 | 70 | 0.642276 | from django.contrib.auth import get_user_model
from django.test import TestCase
class UserManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
email="normal@user.com", password="testing@123")
self.assertEqual(user.email, 'normal@user.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNotNone(user.username)
self.assertIsNotNone(user.email)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email='')
with self.assertRaises(ValueError):
User.objects.create_user(email='', password="testing@123")
def test_create_superuser(self):
User = get_user_model()
admin = User.objects.create_superuser(
email="admin@user.com", password="testing@123")
self.assertEqual(admin.email, 'admin@user.com')
self.assertTrue(admin.is_active)
self.assertTrue(admin.is_staff)
self.assertTrue(admin.is_superuser)
try:
self.assertIsNotNone(admin.username)
self.assertIsNotNone(admin.email)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_user(
email='', password="testing@123", is_superuser=False)
| true | true |
f71b21a5dd538deb30896fbf0e23be55c42a7ec4 | 22 | py | Python | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 251 | 2019-08-18T17:19:19.000Z | 2022-03-31T20:38:20.000Z | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 37 | 2019-09-05T08:03:00.000Z | 2022-01-27T12:49:50.000Z | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 20 | 2019-09-01T15:29:56.000Z | 2022-01-27T03:03:18.000Z | __version__ = '3.1.2'
| 11 | 21 | 0.636364 | __version__ = '3.1.2'
| true | true |
f71b21c32e28c3f04700fe8235f89c2fac3de98b | 3,420 | py | Python | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | null | null | null | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | 332 | 2016-05-24T10:51:45.000Z | 2021-09-08T12:28:50.000Z | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | 1 | 2016-08-02T07:52:49.000Z | 2016-08-02T07:52:49.000Z | from flask import Flask, send_from_directory, make_response, request, send_file, jsonify
import werkzeug.exceptions
from jose import jwt, exceptions
import wtforms_json
from .ays import ays_api
from .oauth import oauth_api
from .webhooks import webhooks_api
from .cockpit import cockpit_api
from JumpScale import j
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
wtforms_json.init()
logger = j.logger.get('j.cockpit.api')
def init_blueprints():
if app.config.get('production',True):
print('JWT middleware enable')
ays_api.before_request(process_jwt_token)
cockpit_api.before_request(process_jwt_token)
app.register_blueprint(ays_api)
app.register_blueprint(oauth_api)
app.register_blueprint(webhooks_api)
app.register_blueprint(cockpit_api)
def process_jwt_token():
authorization = request.cookies.get(
'jwt',
request.headers.get(
'Authorization',
None
))
if authorization is None:
response = make_response('Not JWT token')
response.status_code = 401
return response
msg = ""
ss = authorization.split(' ', 1)
if len(ss) != 2:
msg = "Unauthorized"
else:
type, token = ss[0], ss[1]
if type.lower() == 'bearer':
try:
headers = jwt.get_unverified_header(token)
payload = jwt.decode(
token,
app.config['oauth'].get('jwt_key'),
algorithms=[headers['alg']],
audience=app.config['oauth']['organization'],
issuer='itsyouonline')
# case JWT is for an organization
if 'globalid' in payload and payload['globalid'] == app.config['oauth'].get('organization'):
return
# case JWT is for a user
if 'scope' in payload and 'user:memberof:%s' % app.config[
'oauth'].get('organization') in payload['scope']:
return
msg = 'Unauthorized'
except exceptions.ExpiredSignatureError as e:
msg = 'Your JWT has expired'
except exceptions.JOSEError as e:
msg = 'JWT Error: %s' % str(e)
except Exception as e:
msg = 'Unexpected error : %s' % str(e)
else:
msg = 'Your JWT is invalid'
logger.error(msg)
response = make_response(msg)
response.status_code = 401
return response
@app.route('/apidocs/<path:path>')
def send_js(path):
root = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'apidocs')
return send_from_directory(root, path)
@app.route('/', methods=['GET'])
def home():
path = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'index.html')
return send_file(path)
@app.errorhandler(j.exceptions.NotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.AYSNotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.Timeout)
def handle_bad_request(e):
return jsonify(error=e.msg), 408
@app.errorhandler(j.exceptions.BaseJSException)
def handle_bad_request(e):
return jsonify(error=e.msg), 500
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_bad_request(e):
return jsonify(error=e.msg), e.code | 28.264463 | 108 | 0.625146 | from flask import Flask, send_from_directory, make_response, request, send_file, jsonify
import werkzeug.exceptions
from jose import jwt, exceptions
import wtforms_json
from .ays import ays_api
from .oauth import oauth_api
from .webhooks import webhooks_api
from .cockpit import cockpit_api
from JumpScale import j
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
wtforms_json.init()
logger = j.logger.get('j.cockpit.api')
def init_blueprints():
if app.config.get('production',True):
print('JWT middleware enable')
ays_api.before_request(process_jwt_token)
cockpit_api.before_request(process_jwt_token)
app.register_blueprint(ays_api)
app.register_blueprint(oauth_api)
app.register_blueprint(webhooks_api)
app.register_blueprint(cockpit_api)
def process_jwt_token():
authorization = request.cookies.get(
'jwt',
request.headers.get(
'Authorization',
None
))
if authorization is None:
response = make_response('Not JWT token')
response.status_code = 401
return response
msg = ""
ss = authorization.split(' ', 1)
if len(ss) != 2:
msg = "Unauthorized"
else:
type, token = ss[0], ss[1]
if type.lower() == 'bearer':
try:
headers = jwt.get_unverified_header(token)
payload = jwt.decode(
token,
app.config['oauth'].get('jwt_key'),
algorithms=[headers['alg']],
audience=app.config['oauth']['organization'],
issuer='itsyouonline')
if 'globalid' in payload and payload['globalid'] == app.config['oauth'].get('organization'):
return
if 'scope' in payload and 'user:memberof:%s' % app.config[
'oauth'].get('organization') in payload['scope']:
return
msg = 'Unauthorized'
except exceptions.ExpiredSignatureError as e:
msg = 'Your JWT has expired'
except exceptions.JOSEError as e:
msg = 'JWT Error: %s' % str(e)
except Exception as e:
msg = 'Unexpected error : %s' % str(e)
else:
msg = 'Your JWT is invalid'
logger.error(msg)
response = make_response(msg)
response.status_code = 401
return response
@app.route('/apidocs/<path:path>')
def send_js(path):
root = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'apidocs')
return send_from_directory(root, path)
@app.route('/', methods=['GET'])
def home():
path = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'index.html')
return send_file(path)
@app.errorhandler(j.exceptions.NotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.AYSNotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.Timeout)
def handle_bad_request(e):
return jsonify(error=e.msg), 408
@app.errorhandler(j.exceptions.BaseJSException)
def handle_bad_request(e):
return jsonify(error=e.msg), 500
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_bad_request(e):
return jsonify(error=e.msg), e.code | true | true |
f71b21ee28dbbf48f0569576807de49f2234149c | 3,475 | py | Python | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | import random
import networkx as nx
class Node:
def __init__(self, ID=None, message=None, highercmnd=None, lev=None,
parent=None, childNo=None, children_list=None,
cmndlineup=None, cmndnodes=None, ancestor_list=None,
descendant_list=None):
self.ID = ID # an integer ascending from 0 to the number of the nodes minus 1)
self.message = message # some integer number representing an initial message
self.parent = parent # an integer if applicable (non applicable to the root node)
self.highercmnd = highercmnd # an integer ascending from 1 to the height minus 1 indicating the value of knowing
# what superiours know
self.lev = lev # reflects the depth in a tree structure
self.children_list = []
self.cmndlineup = [] # initialization for the full line of command including the node in question
self.cmndnodes = []
self.ancestor_list = [] # a list of the node's direct ancestors
self.descendant_list = [] # a list of the node's direct descendants
def MakeChildren(self, child):
'''
Instantiation of a child node. Needs the node in question.
'''
self.children_list.append(child)
def HasChildren(self):
'''
Boolean determining whether the node in question has a child or not.
'''
if len(self.children_list) != 0:
return True
else:
return False
def MakeDescendants(self, descendant):
'''
Instantiation of a direct descendant node. Needs the node in question.
'''
self.descendant_list.append(descendant)
def children(self):
return self.children_list
def parent(self):
return self.parent
def CommandLineUp(self, l):
'''
This function gathers all the ancestors of the node in question in a list up to l (min(l)=1).
The list is returned.
'''
temp = self.parent
for i in range(l): # depth of message: for l=0 leaf sees only parent. For l=n-1 leaf sees all ancestors
if temp is not None:
self.cmndlineup.append(temp.ID)
temp = temp.parent
else:
break # reached the root
return self.cmndlineup # the final ancestry of the node (or its upstream communication)
def CommandedNodes(self): # attaches to cmndnodes of the node its whole subtree
'''
Attaches to cmndnodes of the node its whole subtree (the node in question is not included).
'''
self.cmndnodes = self.children_list
ncmndnodes = len(self.cmndnodes)
ii = 0 # ATTENTION! CHANGE TO A FOR LOOP IF THE GRAPH IS NOT A TREE ANYMORE
while ii < ncmndnodes:
child = self.cmndnodes[ii]
for childchild in child.children_list:
self.cmndnodes.append(childchild)
ncmndnodes = len(self.cmndnodes)
ii += 1
def Efficiency(self, levin, levcur):
'''
A function to compute the exponential drop in efficiency with level of
a node substitution from another in its subtree.
levin: Int>=0. The substitute's initial level.
levcur: Int>=0. The level on which the candidate node is called to substitute.
'''
self.q = 1/2.**(levin-levcur) | 39.942529 | 122 | 0.606906 | import random
import networkx as nx
class Node:
def __init__(self, ID=None, message=None, highercmnd=None, lev=None,
parent=None, childNo=None, children_list=None,
cmndlineup=None, cmndnodes=None, ancestor_list=None,
descendant_list=None):
self.ID = ID
self.message = message
self.parent = parent
self.highercmnd = highercmnd
self.lev = lev
self.children_list = []
self.cmndlineup = []
self.cmndnodes = []
self.ancestor_list = []
self.descendant_list = [] # a list of the node's direct descendants
def MakeChildren(self, child):
self.children_list.append(child)
def HasChildren(self):
if len(self.children_list) != 0:
return True
else:
return False
def MakeDescendants(self, descendant):
self.descendant_list.append(descendant)
def children(self):
return self.children_list
def parent(self):
return self.parent
def CommandLineUp(self, l):
temp = self.parent
for i in range(l):
if temp is not None:
self.cmndlineup.append(temp.ID)
temp = temp.parent
else:
break
return self.cmndlineup
def CommandedNodes(self):
self.cmndnodes = self.children_list
ncmndnodes = len(self.cmndnodes)
ii = 0
while ii < ncmndnodes:
child = self.cmndnodes[ii]
for childchild in child.children_list:
self.cmndnodes.append(childchild)
ncmndnodes = len(self.cmndnodes)
ii += 1
def Efficiency(self, levin, levcur):
self.q = 1/2.**(levin-levcur) | true | true |
f71b22fcc5f5f5eba2d54b6f00ae8b7fb89c6a76 | 2,334 | py | Python | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | null | null | null | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | null | null | null | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | 1 | 2019-06-16T06:41:47.000Z | 2019-06-16T06:41:47.000Z | """This module's main class reads a text corpus and assembles a list of n
most common words."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.utils.cltk_logger import logger
from collections import Counter
from nltk.tokenize.punkt import PunktLanguageVars
class Frequency:
"""Methods for making word frequency lists."""
def __init__(self):
"""Language taken as argument, necessary used when saving word frequencies to
``cltk_data/user_data``."""
self.punkt = PunktLanguageVars()
self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']
def counter_from_str(self, string):
"""Build word frequency list from incoming string."""
string_list = [chars for chars in string if chars not in self.punctuation]
string_joined = ''.join(string_list)
tokens = self.punkt.word_tokenize(string_joined)
return Counter(tokens)
def counter_from_corpus(self, corpus):
"""Build word frequency list from one of several available corpora.
TODO: Make this count iteratively, not all at once
"""
assert corpus in ['phi5', 'tlg'], \
"Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)
all_strings = self._assemble_corpus_string(corpus=corpus)
return self.counter_from_str(all_strings)
def _assemble_corpus_string(self, corpus):
"""Takes a list of filepaths, returns a string containing contents of
all files."""
if corpus == 'phi5':
filepaths = assemble_phi5_author_filepaths()
file_cleaner = phi5_plaintext_cleanup
elif corpus == 'tlg':
filepaths = assemble_tlg_author_filepaths()
file_cleaner = tlg_plaintext_cleanup
for filepath in filepaths:
with open(filepath) as file_open:
file_read = file_open.read().lower()
file_clean = file_cleaner(file_read)
yield file_clean
| 39.559322 | 97 | 0.670094 |
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.utils.cltk_logger import logger
from collections import Counter
from nltk.tokenize.punkt import PunktLanguageVars
class Frequency:
def __init__(self):
self.punkt = PunktLanguageVars()
self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']
def counter_from_str(self, string):
string_list = [chars for chars in string if chars not in self.punctuation]
string_joined = ''.join(string_list)
tokens = self.punkt.word_tokenize(string_joined)
return Counter(tokens)
def counter_from_corpus(self, corpus):
assert corpus in ['phi5', 'tlg'], \
"Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)
all_strings = self._assemble_corpus_string(corpus=corpus)
return self.counter_from_str(all_strings)
def _assemble_corpus_string(self, corpus):
if corpus == 'phi5':
filepaths = assemble_phi5_author_filepaths()
file_cleaner = phi5_plaintext_cleanup
elif corpus == 'tlg':
filepaths = assemble_tlg_author_filepaths()
file_cleaner = tlg_plaintext_cleanup
for filepath in filepaths:
with open(filepath) as file_open:
file_read = file_open.read().lower()
file_clean = file_cleaner(file_read)
yield file_clean
| true | true |
f71b22feb4672df4c2b53105a50c79c3c5c48548 | 3,225 | py | Python | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | from bottle import route, get, post
from bottle import run, debug
from bottle import request, response, redirect, template
from bottle import static_file
import dataset
import json
from bottle import default_app
#http://localhost:8090
@route("/")
def get_midterm():
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = todo_table.find()
items = [ dict(x) for x in list(items) ]
return template("Midterm", items=items)
@route("/static/png/<filename:re:.*\.png")
@route("/image/<filename:re:.*\.png")
def get_picture():
return static_file(filename="the_boat.png", root="static", mimetype="image/png")
@route("/static/<filename:path>")
def get_static(filename):
return static_file(filename=filename, root="static")
@route("/delete/<id>")
def get_delete(id):
id = int(id)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
print(f"We need to delete id# {id}...")
todo_table.delete(id=id)
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return template("deleted", id=id)
@get("/insert")
def get_insert():
return template("insert")
@post("/insert")
def post_insert():
course_number = request.forms.get('course_number')
print("course_number=", course_number)
course_name = request.forms.get('course_name')
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.insert({
'course_number' : course_number.strip(),
'course_name' : course_name.strip(),
'done' : 1
})
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
@get("/edit/<id>")
def get_edit(id):
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = list(todo_table.find(id=id))
if len(items) != 1:
response.status="404 Not Found:"+str(id)
return
items = [ dict(x) for x in items ]
print(items)
print(items[0])
except Exception as e:
print(e)
response.status="409 Bad Request:"+str(e)
return
return template("edit", item=items[0]) # put something here
@post("/edit")
def post_edit():
id = request.forms.get('id')
id = int(id)
course_number = request.forms.get('course_number')
course_name = request.forms.get('course_name')
print("course_number=", course_number)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.update({
'id' : id,
'course_number' : course_number.strip(),
'course_name' : course_name.strip()
}, ['id'])
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
if __name__ == "__main__":
debug(True)
run(host="localhost", port=8090)
else:
application = default_app() | 26.434426 | 84 | 0.622636 | from bottle import route, get, post
from bottle import run, debug
from bottle import request, response, redirect, template
from bottle import static_file
import dataset
import json
from bottle import default_app
@route("/")
def get_midterm():
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = todo_table.find()
items = [ dict(x) for x in list(items) ]
return template("Midterm", items=items)
@route("/static/png/<filename:re:.*\.png")
@route("/image/<filename:re:.*\.png")
def get_picture():
return static_file(filename="the_boat.png", root="static", mimetype="image/png")
@route("/static/<filename:path>")
def get_static(filename):
return static_file(filename=filename, root="static")
@route("/delete/<id>")
def get_delete(id):
id = int(id)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
print(f"We need to delete id# {id}...")
todo_table.delete(id=id)
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return template("deleted", id=id)
@get("/insert")
def get_insert():
return template("insert")
@post("/insert")
def post_insert():
course_number = request.forms.get('course_number')
print("course_number=", course_number)
course_name = request.forms.get('course_name')
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.insert({
'course_number' : course_number.strip(),
'course_name' : course_name.strip(),
'done' : 1
})
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
@get("/edit/<id>")
def get_edit(id):
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = list(todo_table.find(id=id))
if len(items) != 1:
response.status="404 Not Found:"+str(id)
return
items = [ dict(x) for x in items ]
print(items)
print(items[0])
except Exception as e:
print(e)
response.status="409 Bad Request:"+str(e)
return
return template("edit", item=items[0])
@post("/edit")
def post_edit():
id = request.forms.get('id')
id = int(id)
course_number = request.forms.get('course_number')
course_name = request.forms.get('course_name')
print("course_number=", course_number)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.update({
'id' : id,
'course_number' : course_number.strip(),
'course_name' : course_name.strip()
}, ['id'])
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
if __name__ == "__main__":
debug(True)
run(host="localhost", port=8090)
else:
application = default_app() | true | true |
f71b23bfc71f48b0e7ac2b5989a80a9a6d09b17b | 4,803 | py | Python | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | import ssl
import socket
import OpenSSL
import sqlite3
import signal
from functools import wraps
from numpy.core.numeric import count_nonzero
import requests
from multiprocessing import Process, Value
TIMEOUT = Value('i', 5)
cMax = Value('i', 2)
ca_num = Value('i', 0)
class TimeoutException(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimeoutException() #when the signal have been handle raise the exception
@wraps(timeout, *args)
def new_f(*args):
signal.signal(signal.SIGALRM, handler) #link the SIGALARM signal to the handler
signal.alarm(timeout) #create an alarm of timeout second
res = f(*args)
signal.alarm(0) #reinitiate the alarm
return res
return new_f
return decorate
@deadline(TIMEOUT.value)
def get_certificate(host, port=443, timeout=10):
context = ssl.create_default_context()
context.set_ciphers('DEFAULT:@SECLEVEL=1')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = socket.create_connection((host, port))
sock = context.wrap_socket(conn, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
@deadline(60)
def url_direct(user):
user = 'http://' + user
user = requests.get(user).url.split('/')[2]
return user
@deadline(60)
def url_with_header(user):
user = 'http://' + user
user = requests.head(user).headers['location'].split('/')[2]
return user
def get_url(user, counter, error):
try:
user = url_direct(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
try:
user = url_with_header(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
print(" Impossible to get url from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
return user, counter
def processus(user):
counter = 0
ok = False
while ok == False:
try:
certificate = get_certificate(user)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
provider = x509.get_issuer().organizationName
cur.execute("INSERT INTO ca VALUES (?, ?, ?)", (user, provider, ca_num.value))
print(user, ": ", provider)
ok = True
except TimeoutException as e:
if (counter == cMax.value-1):
if (TIMEOUT.value != 60):
TIMEOUT.value = 60
counter -= counter
else:
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
except Exception as e:
if (counter == cMax.value-1):
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
finally:
con.commit()
ca_num.value += 1
if counter == cMax.value:
ok = True
con = sqlite3.connect('ca-providers.db')
cur = con.cursor()
try:
cur.execute("CREATE TABLE ca (ca_user, ca_provider, ca_num)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM ca")
try:
cur.execute("CREATE TABLE errors (user, extension, error)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM errors")
con.commit()
debut = 0
with open("list1m2020.csv", "r") as f:
for line in f:
user = line.split()[0]
p = Process(target=processus, args=(user,))
p.start()
p.join()
if (TIMEOUT.value != 5):
TIMEOUT.value = 5
con.close() | 29.466258 | 128 | 0.564022 | import ssl
import socket
import OpenSSL
import sqlite3
import signal
from functools import wraps
from numpy.core.numeric import count_nonzero
import requests
from multiprocessing import Process, Value
TIMEOUT = Value('i', 5)
cMax = Value('i', 2)
ca_num = Value('i', 0)
class TimeoutException(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimeoutException()
@wraps(timeout, *args)
def new_f(*args):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
res = f(*args)
signal.alarm(0)
return res
return new_f
return decorate
@deadline(TIMEOUT.value)
def get_certificate(host, port=443, timeout=10):
context = ssl.create_default_context()
context.set_ciphers('DEFAULT:@SECLEVEL=1')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = socket.create_connection((host, port))
sock = context.wrap_socket(conn, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
@deadline(60)
def url_direct(user):
user = 'http://' + user
user = requests.get(user).url.split('/')[2]
return user
@deadline(60)
def url_with_header(user):
user = 'http://' + user
user = requests.head(user).headers['location'].split('/')[2]
return user
def get_url(user, counter, error):
try:
user = url_direct(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
try:
user = url_with_header(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
print(" Impossible to get url from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
return user, counter
def processus(user):
counter = 0
ok = False
while ok == False:
try:
certificate = get_certificate(user)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
provider = x509.get_issuer().organizationName
cur.execute("INSERT INTO ca VALUES (?, ?, ?)", (user, provider, ca_num.value))
print(user, ": ", provider)
ok = True
except TimeoutException as e:
if (counter == cMax.value-1):
if (TIMEOUT.value != 60):
TIMEOUT.value = 60
counter -= counter
else:
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
except Exception as e:
if (counter == cMax.value-1):
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
finally:
con.commit()
ca_num.value += 1
if counter == cMax.value:
ok = True
con = sqlite3.connect('ca-providers.db')
cur = con.cursor()
try:
cur.execute("CREATE TABLE ca (ca_user, ca_provider, ca_num)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM ca")
try:
cur.execute("CREATE TABLE errors (user, extension, error)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM errors")
con.commit()
debut = 0
with open("list1m2020.csv", "r") as f:
for line in f:
user = line.split()[0]
p = Process(target=processus, args=(user,))
p.start()
p.join()
if (TIMEOUT.value != 5):
TIMEOUT.value = 5
con.close() | true | true |
f71b2498aa5fdb11b49d58a54f14653f45df1cb1 | 10,087 | py | Python | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# type: ignore
import os
from pkg_resources import parse_version
try:
from cluplus import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-cluplus') or 'dev'
# Are we building in RTD?
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# Sphinx template selected in cookiecutter and whether to use releases
sphinx_template = 'sphinx-bootstrap'
use_releases = 'no'
if sphinx_template == 'sphinx-bootstrap':
import sphinx_bootstrap_theme
# Importing matplotlib here with agg to prevent tkinter error in readthedocs
# import matplotlib
# matplotlib.use('agg')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# source_parsers = {
# '.md': 'recommonmark.parser.CommonMarkParser',
# }
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cluplus'
copyright = '{0}, {1}'.format('2021', 'Florian Briegel')
author = 'Florian Briegel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = f"""
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
.. |cluplus_version| replace:: {__version__}
"""
# -- Options for HTML output ----------------------------------------------
html_css_files = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_template == 'sphinx-bootstrap':
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: {0}".format(project),
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = '_static/sdssv_logo_small.png'
html_css_files += ["custom_bootstrap.css"]
html_sidebars = {'**': ['localtoc.html']}
elif sphinx_template == 'alabaster':
html_theme = 'alabaster'
html_theme_options = {
'logo': 'sdssv_logo.png',
'github_user': 'sdss',
'github_repo': project,
'github_button': True,
'github_type': 'star',
'sidebar_collapse': True,
'page_width': '80%'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_css_files += ["custom.css"]
html_favicon = './_static/favicon_sdssv.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this
if on_rtd:
html_static_path = []
else:
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('cluplus')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cluplus', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
if use_releases == 'yes':
extensions += ['sdsstools.releases']
releases_github_path = 'wasndas/cluplus'
releases_document_name = ['CHANGELOG']
releases_unstable_prehistory = True
| 30.847095 | 84 | 0.652027 |
import os
from pkg_resources import parse_version
try:
from cluplus import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-cluplus') or 'dev'
on_rtd = os.environ.get('READTHEDOCS') == 'True'
sphinx_template = 'sphinx-bootstrap'
use_releases = 'no'
if sphinx_template == 'sphinx-bootstrap':
import sphinx_bootstrap_theme
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = 'cluplus'
copyright = '{0}, {1}'.format('2021', 'Florian Briegel')
author = 'Florian Briegel'
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = f"""
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
.. |cluplus_version| replace:: {__version__}
"""
# -- Options for HTML output ----------------------------------------------
html_css_files = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_template == 'sphinx-bootstrap':
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: {0}".format(project),
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = '_static/sdssv_logo_small.png'
html_css_files += ["custom_bootstrap.css"]
html_sidebars = {'**': ['localtoc.html']}
elif sphinx_template == 'alabaster':
html_theme = 'alabaster'
html_theme_options = {
'logo': 'sdssv_logo.png',
'github_user': 'sdss',
'github_repo': project,
'github_button': True,
'github_type': 'star',
'sidebar_collapse': True,
'page_width': '80%'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_css_files += ["custom.css"]
html_favicon = './_static/favicon_sdssv.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this
if on_rtd:
html_static_path = []
else:
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('cluplus')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cluplus', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
if use_releases == 'yes':
extensions += ['sdsstools.releases']
releases_github_path = 'wasndas/cluplus'
releases_document_name = ['CHANGELOG']
releases_unstable_prehistory = True
| true | true |
f71b252375625601941b43c73fbf023daa436c67 | 304 | py | Python | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | """
Pygame setup for Some Platformer Game
Created by sheepy0125
08/10/2021
"""
import pygame
SCREEN_SIZE = (500, 500)
SCROLL_OFFSET = (SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Some Platformer Game")
clock = pygame.time.Clock()
| 21.714286 | 58 | 0.746711 |
import pygame
SCREEN_SIZE = (500, 500)
SCROLL_OFFSET = (SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Some Platformer Game")
clock = pygame.time.Clock()
| true | true |
f71b256176da366107fb45bcef939ee05c453375 | 13,883 | py | Python | nexmo/__init__.py | cook-health/messaging | 1a827b97d9af6e56d55c362b29dd79a6cb373f88 | [
"MIT"
] | null | null | null | nexmo/__init__.py | cook-health/messaging | 1a827b97d9af6e56d55c362b29dd79a6cb373f88 | [
"MIT"
] | 2 | 2018-03-14T10:42:37.000Z | 2018-03-14T11:01:31.000Z | nexmo/__init__.py | Seliniux777/Nexmo-nexmo-python | d1d60e8068b1cb23f12507a6ec1cd500285890b5 | [
"MIT"
] | 1 | 2020-10-18T09:41:15.000Z | 2020-10-18T09:41:15.000Z | from platform import python_version
import hashlib
import hmac
import jwt
import os
import requests
import sys
import time
from uuid import uuid4
import warnings
if sys.version_info[0] == 3:
string_types = (str, bytes)
else:
string_types = (unicode, str)
__version__ = '2.0.0'
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
class Client():
def __init__(self, **kwargs):
self.api_key = kwargs.get('key', None) or os.environ.get('NEXMO_API_KEY', None)
self.api_secret = kwargs.get('secret', None) or os.environ.get('NEXMO_API_SECRET', None)
self.signature_secret = kwargs.get('signature_secret', None) or os.environ.get('NEXMO_SIGNATURE_SECRET', None)
self.signature_method = kwargs.get('signature_method', None) or os.environ.get('NEXMO_SIGNATURE_METHOD', None)
if self.signature_method == 'md5':
self.signature_method = hashlib.md5
elif self.signature_method == 'sha1':
self.signature_method = hashlib.sha1
elif self.signature_method == 'sha256':
self.signature_method = hashlib.sha256
elif self.signature_method == 'sha512':
self.signature_method = hashlib.sha512
self.application_id = kwargs.get('application_id', None)
self.private_key = kwargs.get('private_key', None)
if isinstance(self.private_key, string_types) and '\n' not in self.private_key:
with open(self.private_key, 'rb') as key_file:
self.private_key = key_file.read()
self.host = 'rest.nexmo.com'
self.api_host = 'api.nexmo.com'
user_agent = 'nexmo-python/{0}/{1}'.format(__version__, python_version())
if 'app_name' in kwargs and 'app_version' in kwargs:
user_agent += '/{0}/{1}'.format(kwargs['app_name'], kwargs['app_version'])
self.headers = {'User-Agent': user_agent}
self.auth_params = {}
def auth(self, params=None, **kwargs):
self.auth_params = params or kwargs
def send_message(self, params):
return self.post(self.host, '/sms/json', params)
def get_balance(self):
return self.get(self.host, '/account/get-balance')
def get_country_pricing(self, country_code):
return self.get(self.host, '/account/get-pricing/outbound', {'country': country_code})
def get_prefix_pricing(self, prefix):
return self.get(self.host, '/account/get-prefix-pricing/outbound', {'prefix': prefix})
def get_sms_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/sms', {'phone': number})
def get_voice_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/voice', {'phone': number})
def update_settings(self, params=None, **kwargs):
return self.post(self.host, '/account/settings', params or kwargs)
def topup(self, params=None, **kwargs):
return self.post(self.host, '/account/top-up', params or kwargs)
def get_account_numbers(self, params=None, **kwargs):
return self.get(self.host, '/account/numbers', params or kwargs)
def get_available_numbers(self, country_code, params=None, **kwargs):
return self.get(self.host, '/number/search', dict(params or kwargs, country=country_code))
def buy_number(self, params=None, **kwargs):
return self.post(self.host, '/number/buy', params or kwargs)
def cancel_number(self, params=None, **kwargs):
return self.post(self.host, '/number/cancel', params or kwargs)
def update_number(self, params=None, **kwargs):
return self.post(self.host, '/number/update', params or kwargs)
def get_message(self, message_id):
return self.get(self.host, '/search/message', {'id': message_id})
def get_message_rejections(self, params=None, **kwargs):
return self.get(self.host, '/search/rejections', params or kwargs)
def search_messages(self, params=None, **kwargs):
return self.get(self.host, '/search/messages', params or kwargs)
def send_ussd_push_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd/json', params or kwargs)
def send_ussd_prompt_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd-prompt/json', params or kwargs)
def send_2fa_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/2fa/json', params or kwargs)
def send_event_alert_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/json', params or kwargs)
def send_marketing_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/marketing/json', params or kwargs)
def get_event_alert_numbers(self):
return self.get(self.host, '/sc/us/alert/opt-in/query/json')
def resubscribe_event_alert_number(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/opt-in/manage/json', params or kwargs)
def initiate_call(self, params=None, **kwargs):
return self.post(self.host, '/call/json', params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts/json', params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts-prompt/json', params or kwargs)
def start_verification(self, params=None, **kwargs):
return self.post(self.api_host, '/verify/json', params or kwargs)
def send_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#send_verification_request is deprecated (use #start_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/json', params or kwargs)
def check_verification(self, request_id, params=None, **kwargs):
return self.post(self.api_host, '/verify/check/json', dict(params or kwargs, request_id=request_id))
def check_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#check_verification_request is deprecated (use #check_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/check/json', params or kwargs)
def get_verification(self, request_id):
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def get_verification_request(self, request_id):
warnings.warn('nexmo.Client#get_verification_request is deprecated (use #get_verification instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def cancel_verification(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'cancel'})
def trigger_next_verification_event(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'trigger_next_event'})
def control_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#control_verification_request is deprecated', DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/control/json', params or kwargs)
def get_basic_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/basic/json', params or kwargs)
def get_standard_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/standard/json', params or kwargs)
def get_number_insight(self, params=None, **kwargs):
warnings.warn('nexmo.Client#get_number_insight is deprecated (use #get_standard_number_insight instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/number/lookup/json', params or kwargs)
def get_advanced_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/advanced/json', params or kwargs)
def request_number_insight(self, params=None, **kwargs):
return self.post(self.host, '/ni/json', params or kwargs)
def get_applications(self, params=None, **kwargs):
return self.get(self.api_host, '/v1/applications', params or kwargs)
def get_application(self, application_id):
return self.get(self.api_host, '/v1/applications/' + application_id)
def create_application(self, params=None, **kwargs):
return self.post(self.api_host, '/v1/applications', params or kwargs)
def update_application(self, application_id, params=None, **kwargs):
return self.put(self.api_host, '/v1/applications/' + application_id, params or kwargs)
def delete_application(self, application_id):
return self.delete(self.api_host, '/v1/applications/' + application_id)
def create_call(self, params=None, **kwargs):
return self.__post('/v1/calls', params or kwargs)
def get_calls(self, params=None, **kwargs):
return self.__get('/v1/calls', params or kwargs)
def get_call(self, uuid):
return self.__get('/v1/calls/' + uuid)
def update_call(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid, params or kwargs)
def send_audio(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/stream', params or kwargs)
def stop_audio(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/stream')
def send_speech(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/talk', params or kwargs)
def stop_speech(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/talk')
def send_dtmf(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/dtmf', params or kwargs)
def check_signature(self, params):
params = dict(params)
signature = params.pop('sig', '').lower()
return hmac.compare_digest(signature, self.signature(params))
def signature(self, params):
if self.signature_method:
hasher = hmac.new(self.signature_secret.encode(), digestmod=self.signature_method)
else:
hasher = hashlib.md5()
# Add timestamp if not already present
if not params.get("timestamp"):
params["timestamp"] = int(time.time())
for key in sorted(params):
value = params[key]
if isinstance(value, str):
value = value.replace('&', '_').replace('=', '_')
hasher.update('&{0}={1}'.format(key, value).encode('utf-8'))
if self.signature_method is None:
hasher.update(self.signature_secret.encode())
return hasher.hexdigest()
def get(self, host, request_uri, params=None):
uri = 'https://' + host + request_uri
params = dict(params or {}, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.get(uri, params=params, headers=self.headers))
def post(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.post(uri, data=params, headers=self.headers))
def put(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.put(uri, json=params, headers=self.headers))
def delete(self, host, request_uri):
uri = 'https://' + host + request_uri
params = dict(api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.delete(uri, params=params, headers=self.headers))
def parse(self, host, response):
if response.status_code == 401:
raise AuthenticationError
elif response.status_code == 204:
return None
elif 200 <= response.status_code < 300:
return response.json()
elif 400 <= response.status_code < 500:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ClientError(message)
elif 500 <= response.status_code < 600:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ServerError(message)
def __get(self, request_uri, params=None):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.get(uri, params=params or {}, headers=self.__headers()))
def __post(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.post(uri, json=params, headers=self.__headers()))
def __put(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.put(uri, json=params, headers=self.__headers()))
def __delete(self, request_uri):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.delete(uri, headers=self.__headers()))
def __headers(self):
iat = int(time.time())
payload = dict(self.auth_params)
payload.setdefault('application_id', self.application_id)
payload.setdefault('iat', iat)
payload.setdefault('exp', iat + 60)
payload.setdefault('jti', str(uuid4()))
token = jwt.encode(payload, self.private_key, algorithm='RS256')
return dict(self.headers, Authorization=b'Bearer ' + token)
| 38.457064 | 120 | 0.666859 | from platform import python_version
import hashlib
import hmac
import jwt
import os
import requests
import sys
import time
from uuid import uuid4
import warnings
if sys.version_info[0] == 3:
string_types = (str, bytes)
else:
string_types = (unicode, str)
__version__ = '2.0.0'
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
class Client():
def __init__(self, **kwargs):
self.api_key = kwargs.get('key', None) or os.environ.get('NEXMO_API_KEY', None)
self.api_secret = kwargs.get('secret', None) or os.environ.get('NEXMO_API_SECRET', None)
self.signature_secret = kwargs.get('signature_secret', None) or os.environ.get('NEXMO_SIGNATURE_SECRET', None)
self.signature_method = kwargs.get('signature_method', None) or os.environ.get('NEXMO_SIGNATURE_METHOD', None)
if self.signature_method == 'md5':
self.signature_method = hashlib.md5
elif self.signature_method == 'sha1':
self.signature_method = hashlib.sha1
elif self.signature_method == 'sha256':
self.signature_method = hashlib.sha256
elif self.signature_method == 'sha512':
self.signature_method = hashlib.sha512
self.application_id = kwargs.get('application_id', None)
self.private_key = kwargs.get('private_key', None)
if isinstance(self.private_key, string_types) and '\n' not in self.private_key:
with open(self.private_key, 'rb') as key_file:
self.private_key = key_file.read()
self.host = 'rest.nexmo.com'
self.api_host = 'api.nexmo.com'
user_agent = 'nexmo-python/{0}/{1}'.format(__version__, python_version())
if 'app_name' in kwargs and 'app_version' in kwargs:
user_agent += '/{0}/{1}'.format(kwargs['app_name'], kwargs['app_version'])
self.headers = {'User-Agent': user_agent}
self.auth_params = {}
def auth(self, params=None, **kwargs):
self.auth_params = params or kwargs
def send_message(self, params):
return self.post(self.host, '/sms/json', params)
def get_balance(self):
return self.get(self.host, '/account/get-balance')
def get_country_pricing(self, country_code):
return self.get(self.host, '/account/get-pricing/outbound', {'country': country_code})
def get_prefix_pricing(self, prefix):
return self.get(self.host, '/account/get-prefix-pricing/outbound', {'prefix': prefix})
def get_sms_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/sms', {'phone': number})
def get_voice_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/voice', {'phone': number})
def update_settings(self, params=None, **kwargs):
return self.post(self.host, '/account/settings', params or kwargs)
def topup(self, params=None, **kwargs):
return self.post(self.host, '/account/top-up', params or kwargs)
def get_account_numbers(self, params=None, **kwargs):
return self.get(self.host, '/account/numbers', params or kwargs)
def get_available_numbers(self, country_code, params=None, **kwargs):
return self.get(self.host, '/number/search', dict(params or kwargs, country=country_code))
def buy_number(self, params=None, **kwargs):
return self.post(self.host, '/number/buy', params or kwargs)
def cancel_number(self, params=None, **kwargs):
return self.post(self.host, '/number/cancel', params or kwargs)
def update_number(self, params=None, **kwargs):
return self.post(self.host, '/number/update', params or kwargs)
def get_message(self, message_id):
return self.get(self.host, '/search/message', {'id': message_id})
def get_message_rejections(self, params=None, **kwargs):
return self.get(self.host, '/search/rejections', params or kwargs)
def search_messages(self, params=None, **kwargs):
return self.get(self.host, '/search/messages', params or kwargs)
def send_ussd_push_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd/json', params or kwargs)
def send_ussd_prompt_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd-prompt/json', params or kwargs)
def send_2fa_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/2fa/json', params or kwargs)
def send_event_alert_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/json', params or kwargs)
def send_marketing_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/marketing/json', params or kwargs)
def get_event_alert_numbers(self):
return self.get(self.host, '/sc/us/alert/opt-in/query/json')
def resubscribe_event_alert_number(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/opt-in/manage/json', params or kwargs)
def initiate_call(self, params=None, **kwargs):
return self.post(self.host, '/call/json', params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts/json', params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts-prompt/json', params or kwargs)
def start_verification(self, params=None, **kwargs):
return self.post(self.api_host, '/verify/json', params or kwargs)
def send_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#send_verification_request is deprecated (use #start_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/json', params or kwargs)
def check_verification(self, request_id, params=None, **kwargs):
return self.post(self.api_host, '/verify/check/json', dict(params or kwargs, request_id=request_id))
def check_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#check_verification_request is deprecated (use #check_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/check/json', params or kwargs)
def get_verification(self, request_id):
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def get_verification_request(self, request_id):
warnings.warn('nexmo.Client#get_verification_request is deprecated (use #get_verification instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def cancel_verification(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'cancel'})
def trigger_next_verification_event(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'trigger_next_event'})
def control_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#control_verification_request is deprecated', DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/control/json', params or kwargs)
def get_basic_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/basic/json', params or kwargs)
def get_standard_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/standard/json', params or kwargs)
def get_number_insight(self, params=None, **kwargs):
warnings.warn('nexmo.Client#get_number_insight is deprecated (use #get_standard_number_insight instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/number/lookup/json', params or kwargs)
def get_advanced_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/advanced/json', params or kwargs)
def request_number_insight(self, params=None, **kwargs):
return self.post(self.host, '/ni/json', params or kwargs)
def get_applications(self, params=None, **kwargs):
return self.get(self.api_host, '/v1/applications', params or kwargs)
def get_application(self, application_id):
return self.get(self.api_host, '/v1/applications/' + application_id)
def create_application(self, params=None, **kwargs):
return self.post(self.api_host, '/v1/applications', params or kwargs)
def update_application(self, application_id, params=None, **kwargs):
return self.put(self.api_host, '/v1/applications/' + application_id, params or kwargs)
def delete_application(self, application_id):
return self.delete(self.api_host, '/v1/applications/' + application_id)
def create_call(self, params=None, **kwargs):
return self.__post('/v1/calls', params or kwargs)
def get_calls(self, params=None, **kwargs):
return self.__get('/v1/calls', params or kwargs)
def get_call(self, uuid):
return self.__get('/v1/calls/' + uuid)
def update_call(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid, params or kwargs)
def send_audio(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/stream', params or kwargs)
def stop_audio(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/stream')
def send_speech(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/talk', params or kwargs)
def stop_speech(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/talk')
def send_dtmf(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/dtmf', params or kwargs)
def check_signature(self, params):
params = dict(params)
signature = params.pop('sig', '').lower()
return hmac.compare_digest(signature, self.signature(params))
def signature(self, params):
if self.signature_method:
hasher = hmac.new(self.signature_secret.encode(), digestmod=self.signature_method)
else:
hasher = hashlib.md5()
if not params.get("timestamp"):
params["timestamp"] = int(time.time())
for key in sorted(params):
value = params[key]
if isinstance(value, str):
value = value.replace('&', '_').replace('=', '_')
hasher.update('&{0}={1}'.format(key, value).encode('utf-8'))
if self.signature_method is None:
hasher.update(self.signature_secret.encode())
return hasher.hexdigest()
def get(self, host, request_uri, params=None):
uri = 'https://' + host + request_uri
params = dict(params or {}, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.get(uri, params=params, headers=self.headers))
def post(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.post(uri, data=params, headers=self.headers))
def put(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.put(uri, json=params, headers=self.headers))
def delete(self, host, request_uri):
uri = 'https://' + host + request_uri
params = dict(api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.delete(uri, params=params, headers=self.headers))
def parse(self, host, response):
if response.status_code == 401:
raise AuthenticationError
elif response.status_code == 204:
return None
elif 200 <= response.status_code < 300:
return response.json()
elif 400 <= response.status_code < 500:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ClientError(message)
elif 500 <= response.status_code < 600:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ServerError(message)
def __get(self, request_uri, params=None):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.get(uri, params=params or {}, headers=self.__headers()))
def __post(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.post(uri, json=params, headers=self.__headers()))
def __put(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.put(uri, json=params, headers=self.__headers()))
def __delete(self, request_uri):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.delete(uri, headers=self.__headers()))
def __headers(self):
iat = int(time.time())
payload = dict(self.auth_params)
payload.setdefault('application_id', self.application_id)
payload.setdefault('iat', iat)
payload.setdefault('exp', iat + 60)
payload.setdefault('jti', str(uuid4()))
token = jwt.encode(payload, self.private_key, algorithm='RS256')
return dict(self.headers, Authorization=b'Bearer ' + token)
| true | true |
f71b26b2f58e18b2342f9b9601e14fbabdb77fb3 | 2,930 | py | Python | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import io
import multiprocessing
import subprocess
import sys
from abc import ABC, abstractmethod
from typing import Optional
class ProcessHandler(ABC):
"""An abstraction of process handling calls using the same interface as subprocess.Popen.
See SubprocessProcessHandler below for an example.
"""
@abstractmethod
def wait(self, timeout: Optional[float] = None) -> int:
"""Wait for the underlying process to terminate.
:param timeout: The time to wait for the process to terminate in fractional seconds. Wait
forever by default.
:returns: The process exit code is it has terminated.
:raises: :class:`subprocess.TimeoutExpired`
"""
@abstractmethod
def kill(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def poll(self) -> int:
pass
class SubprocessProcessHandler(ProcessHandler):
"""A `ProcessHandler` that delegates directly to a subprocess.Popen object."""
def __init__(self, process: subprocess.Popen) -> None:
self._process = process
def wait(self, timeout: Optional[float] = None) -> int:
return self._process.wait(timeout=timeout)
def kill(self) -> None:
self._process.kill()
def terminate(self) -> None:
self._process.terminate()
def poll(self) -> int:
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
"""Just like subprocess.communicate, but tees stdout and stderr to both sys.std{out,err} and
a buffer. Only operates on stdout/stderr if the Popen call send them to subprocess.PIPE.
:param stdin: A string to send to the stdin of the subprocess.
:return: (stdout, stderr) as strings.
"""
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = io.BytesIO()
for line in iter(infile.readline, b""):
accumulator.write(line)
outfile.buffer.write(line)
infile.close()
return_function(accumulator.getvalue())
| 29.897959 | 100 | 0.650853 |
import io
import multiprocessing
import subprocess
import sys
from abc import ABC, abstractmethod
from typing import Optional
class ProcessHandler(ABC):
@abstractmethod
def wait(self, timeout: Optional[float] = None) -> int:
@abstractmethod
def kill(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def poll(self) -> int:
pass
class SubprocessProcessHandler(ProcessHandler):
def __init__(self, process: subprocess.Popen) -> None:
self._process = process
def wait(self, timeout: Optional[float] = None) -> int:
return self._process.wait(timeout=timeout)
def kill(self) -> None:
self._process.kill()
def terminate(self) -> None:
self._process.terminate()
def poll(self) -> int:
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = io.BytesIO()
for line in iter(infile.readline, b""):
accumulator.write(line)
outfile.buffer.write(line)
infile.close()
return_function(accumulator.getvalue())
| true | true |
f71b2735ba192a6cc12f568c018d29bbdfde9f83 | 3,035 | py | Python | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | 12 | 2019-09-06T03:48:36.000Z | 2021-04-12T08:36:45.000Z | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | null | null | null | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | 9 | 2019-06-30T07:23:01.000Z | 2020-10-16T10:05:37.000Z | from metrics import bleu, rouge
import argparse
def get_args():
'''
Parse input arguments:
preds_path: The directory in which labels and predictions files are dumped after inference
config_id: The config id mentioned in the labels and predictions filenames
'''
parser = argparse.ArgumentParser()
parser.add_argument("--preds_path")
parser.add_argument("--kb_path")
parser.add_argument("--config_id")
args = parser.parse_args()
return args
def read_results(path,num):
with open(path+"/labels"+str(num)+".txt","r") as fp:
l=fp.readlines()
with open(path+"/predictions"+str(num)+".txt","r") as fp:
p=fp.readlines()
return p,l
def exact_match(p,l):
c=0
for i1,i in enumerate(l):
if p[i1]==l[i1]:
c+=1
print("Per-Resp Acc: ",c/len(l))
def moses_bl_rouge(p,l):
bl = bleu.moses_multi_bleu(p,l)
x = rouge.rouge(p,l)
print('BLEU: %f\nROUGE1-F: %f\nROUGE1-P: %f\nROUGE1-R: %f\nROUGE2-F: %f\nROUGE2-P: %f\nROUGE2-R: %f\nROUGEL-F: %f\nROUGEL-P: %f\nROUGEL-R: %f'%(bl,x['rouge_1/f_score'],x['rouge_1/p_score'],x['rouge_1/r_score'],x['rouge_2/f_score'],
x['rouge_2/p_score'],x['rouge_2/r_score'],x['rouge_l/f_score'],x['rouge_l/p_score'],x['rouge_l/r_score']))
def micro_compute_prf(gold, pred, global_entity_list):
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list:
if p not in gold:
FP += 1
else:
count = 0
return TP,FP,FN,count
def ent_f1(preds,labels,kb_path):
with open(kb_path,'r') as fp:
kb=fp.readlines()
ent=[]
for i in kb:
triples = i.split(' ')
ent.append(triples[1].strip())
ent.append(triples[3].strip())
ent = set(ent)
ent_list = sorted(ent)
mic_pred=0
les=[]
all_TP=0
all_FP=0
all_FN=0
for i in range(len(labels)):
l = labels[i].strip().split()
le=[]
for j in l:
if j in ent_list:
le.append(j)
les.append(le)
p = preds[i].strip().split()
tp,fp,fn,c = micro_compute_prf(le,p,ent_list)
all_TP+=tp
all_FP+=fp
all_FN+=fn
mic_pred+=c
mic_prec = all_TP/float(all_TP+all_FP)
mic_rec = all_TP/float(all_TP + all_FN)
mic_f1=2 * mic_prec * mic_rec / float(mic_prec + mic_rec)
print("Entity-F1:",mic_f1)
if __name__=='__main__':
args = get_args()
result_path = args.preds_path
kb_path = args.kb_path
config_id = args.config_id
print(config_id,"\n")
preds,labels = read_results(result_path,config_id)
exact_match(preds,labels)
moses_bl_rouge(preds,labels)
ent_f1(preds,labels,kb_path)
| 28.632075 | 235 | 0.559143 | from metrics import bleu, rouge
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--preds_path")
parser.add_argument("--kb_path")
parser.add_argument("--config_id")
args = parser.parse_args()
return args
def read_results(path,num):
with open(path+"/labels"+str(num)+".txt","r") as fp:
l=fp.readlines()
with open(path+"/predictions"+str(num)+".txt","r") as fp:
p=fp.readlines()
return p,l
def exact_match(p,l):
c=0
for i1,i in enumerate(l):
if p[i1]==l[i1]:
c+=1
print("Per-Resp Acc: ",c/len(l))
def moses_bl_rouge(p,l):
bl = bleu.moses_multi_bleu(p,l)
x = rouge.rouge(p,l)
print('BLEU: %f\nROUGE1-F: %f\nROUGE1-P: %f\nROUGE1-R: %f\nROUGE2-F: %f\nROUGE2-P: %f\nROUGE2-R: %f\nROUGEL-F: %f\nROUGEL-P: %f\nROUGEL-R: %f'%(bl,x['rouge_1/f_score'],x['rouge_1/p_score'],x['rouge_1/r_score'],x['rouge_2/f_score'],
x['rouge_2/p_score'],x['rouge_2/r_score'],x['rouge_l/f_score'],x['rouge_l/p_score'],x['rouge_l/r_score']))
def micro_compute_prf(gold, pred, global_entity_list):
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list:
if p not in gold:
FP += 1
else:
count = 0
return TP,FP,FN,count
def ent_f1(preds,labels,kb_path):
with open(kb_path,'r') as fp:
kb=fp.readlines()
ent=[]
for i in kb:
triples = i.split(' ')
ent.append(triples[1].strip())
ent.append(triples[3].strip())
ent = set(ent)
ent_list = sorted(ent)
mic_pred=0
les=[]
all_TP=0
all_FP=0
all_FN=0
for i in range(len(labels)):
l = labels[i].strip().split()
le=[]
for j in l:
if j in ent_list:
le.append(j)
les.append(le)
p = preds[i].strip().split()
tp,fp,fn,c = micro_compute_prf(le,p,ent_list)
all_TP+=tp
all_FP+=fp
all_FN+=fn
mic_pred+=c
mic_prec = all_TP/float(all_TP+all_FP)
mic_rec = all_TP/float(all_TP + all_FN)
mic_f1=2 * mic_prec * mic_rec / float(mic_prec + mic_rec)
print("Entity-F1:",mic_f1)
if __name__=='__main__':
args = get_args()
result_path = args.preds_path
kb_path = args.kb_path
config_id = args.config_id
print(config_id,"\n")
preds,labels = read_results(result_path,config_id)
exact_match(preds,labels)
moses_bl_rouge(preds,labels)
ent_f1(preds,labels,kb_path)
| true | true |
f71b27f6c3b0d480c4f0a1707e1a22e41cb11bbd | 7,941 | py | Python | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | 1 | 2022-03-16T14:43:26.000Z | 2022-03-16T14:43:26.000Z | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | null | null | null | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | 4 | 2022-03-12T10:17:00.000Z | 2022-03-26T08:40:43.000Z | """
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key,
get_max_age,
has_vary_header,
learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE
so that it'll get called last during the response phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, "_cache_update_cache") and request._cache_update_cache
def process_response(self, request, response):
"""Set the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if (
not request.COOKIES
and response.cookies
and has_vary_header(response, "Cookie")
):
return response
# Don't cache a response with 'Cache-Control: private'
if "private" in response.get("Cache-Control", ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(
request, response, timeout, self.key_prefix, cache=self.cache
)
if hasattr(response, "render") and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE
so that it'll get called last during the request phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Check whether the page is already cached and return the cached
version if available.
"""
if request.method not in ("GET", "HEAD"):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, "GET", cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == "HEAD":
cache_key = get_cache_key(
request, self.key_prefix, "HEAD", cache=self.cache
)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
super().__init__(get_response)
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs["key_prefix"]
if key_prefix is None:
key_prefix = ""
self.key_prefix = key_prefix
except KeyError:
pass
try:
cache_alias = kwargs["cache_alias"]
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
self.cache_alias = cache_alias
self.cache = caches[self.cache_alias]
except KeyError:
pass
if cache_timeout is not None:
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
| 38.362319 | 86 | 0.673467 |
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key,
get_max_age,
has_vary_header,
learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, "_cache_update_cache") and request._cache_update_cache
def process_response(self, request, response):
if not self._should_update_cache(request, response):
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
if (
not request.COOKIES
and response.cookies
and has_vary_header(response, "Cookie")
):
return response
if "private" in response.get("Cache-Control", ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(
request, response, timeout, self.key_prefix, cache=self.cache
)
if hasattr(response, "render") and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
if request.method not in ("GET", "HEAD"):
request._cache_update_cache = False
return None
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, "GET", cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == "HEAD":
cache_key = get_cache_key(
request, self.key_prefix, "HEAD", cache=self.cache
)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
super().__init__(get_response)
try:
key_prefix = kwargs["key_prefix"]
if key_prefix is None:
key_prefix = ""
self.key_prefix = key_prefix
except KeyError:
pass
try:
cache_alias = kwargs["cache_alias"]
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
self.cache_alias = cache_alias
self.cache = caches[self.cache_alias]
except KeyError:
pass
if cache_timeout is not None:
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
| true | true |
f71b286c1207f4d6dae4a96c65379f266e26d4b1 | 7,686 | py | Python | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from newrelic.api.external_trace import ExternalTrace
from newrelic.api.web_transaction import WebTransactionWrapper
from newrelic.api.transaction import current_transaction
from newrelic.api.time_trace import record_exception
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.object_names import callable_name
def _get_uri_method(instance, *args, **kwargs):
target = instance._channel.target().decode('utf-8')
method = instance._method.decode('utf-8').lstrip('/')
uri = 'grpc://%s/%s' % (target, method)
return (uri, method)
def _prepare_request(
transaction, guid, request,
timeout=None, metadata=None, *args, **kwargs):
metadata = metadata and list(metadata) or []
dt_metadata = transaction._create_distributed_trace_data_with_guid(guid)
metadata.extend(
transaction._generate_distributed_trace_headers(dt_metadata)
)
args = (request, timeout, metadata) + args
return args, kwargs
def _prepare_request_stream(
transaction, guid, request_iterator, *args, **kwargs):
return _prepare_request(
transaction, guid, request_iterator, *args, **kwargs)
def wrap_call(module, object_path, prepare):
def _call_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
uri, method = _get_uri_method(instance)
with ExternalTrace('gRPC', uri, method):
args, kwargs = prepare(transaction, None, *args, **kwargs)
return wrapped(*args, **kwargs)
wrap_function_wrapper(module, object_path, _call_wrapper)
def wrap_future(module, object_path, prepare):
def _future_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
guid = '%016x' % random.getrandbits(64)
uri, method = _get_uri_method(instance)
args, kwargs = prepare(transaction, guid, *args, **kwargs)
future = wrapped(*args, **kwargs)
future._nr_guid = guid
future._nr_args = ('gRPC', uri, method)
future._nr_start_time = time.time()
# In non-streaming responses, result is typically called instead of
# using the iterator. In streaming calls, the iterator is typically
# used.
return future
wrap_function_wrapper(module, object_path, _future_wrapper)
def wrap_next(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
try:
return _wrapped(*_args, **_kwargs)
except Exception:
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
def wrap_result(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
try:
result = _wrapped(*_args, **_kwargs)
except Exception:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
else:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
return result
def _bind_transaction_args(rpc_event, state, behavior, *args, **kwargs):
return rpc_event, behavior
def grpc_web_transaction(wrapped, instance, args, kwargs):
rpc_event, behavior = _bind_transaction_args(*args, **kwargs)
behavior_name = callable_name(behavior)
call_details = (
getattr(rpc_event, 'call_details', None) or
getattr(rpc_event, 'request_call_details', None))
metadata = (
getattr(rpc_event, 'invocation_metadata', None) or
getattr(rpc_event, 'request_metadata', None))
host = port = None
if call_details:
try:
host, port = call_details.host.split(b':', 1)
except Exception:
pass
request_path = call_details.method
return WebTransactionWrapper(
wrapped,
name=behavior_name,
request_path=request_path,
host=host,
port=port,
headers=metadata)(*args, **kwargs)
def _trailing_metadata(state, *args, **kwargs):
return state.trailing_metadata
def _nr_wrap_status_code(wrapped, instance, args, kwargs):
status_code = wrapped(*args, **kwargs)
response_headers = _trailing_metadata(*args, **kwargs)
transaction = current_transaction()
if transaction:
transaction.process_response(status_code, response_headers)
return status_code
def _nr_wrap_abort(wrapped, instance, args, kwargs):
record_exception()
return wrapped(*args, **kwargs)
def instrument_grpc__channel(module):
wrap_call(module, '_UnaryUnaryMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_UnaryUnaryMultiCallable.with_call',
_prepare_request)
wrap_future(module, '_UnaryUnaryMultiCallable.future',
_prepare_request)
wrap_future(module, '_UnaryStreamMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_StreamUnaryMultiCallable.__call__',
_prepare_request_stream)
wrap_call(module, '_StreamUnaryMultiCallable.with_call',
_prepare_request_stream)
wrap_future(module, '_StreamUnaryMultiCallable.future',
_prepare_request_stream)
wrap_future(module, '_StreamStreamMultiCallable.__call__',
_prepare_request_stream)
if hasattr(module, '_MultiThreadedRendezvous'):
wrap_function_wrapper(module, '_MultiThreadedRendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_MultiThreadedRendezvous._next',
wrap_next)
else:
wrap_function_wrapper(module, '_Rendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_Rendezvous._next',
wrap_next)
wrap_function_wrapper(module, '_Rendezvous.cancel',
wrap_result)
def instrument_grpc_server(module):
wrap_function_wrapper(module, '_unary_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_stream_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_completion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abortion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abort',
_nr_wrap_abort)
| 33.710526 | 76 | 0.683971 |
import random
import time
from newrelic.api.external_trace import ExternalTrace
from newrelic.api.web_transaction import WebTransactionWrapper
from newrelic.api.transaction import current_transaction
from newrelic.api.time_trace import record_exception
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.object_names import callable_name
def _get_uri_method(instance, *args, **kwargs):
target = instance._channel.target().decode('utf-8')
method = instance._method.decode('utf-8').lstrip('/')
uri = 'grpc://%s/%s' % (target, method)
return (uri, method)
def _prepare_request(
transaction, guid, request,
timeout=None, metadata=None, *args, **kwargs):
metadata = metadata and list(metadata) or []
dt_metadata = transaction._create_distributed_trace_data_with_guid(guid)
metadata.extend(
transaction._generate_distributed_trace_headers(dt_metadata)
)
args = (request, timeout, metadata) + args
return args, kwargs
def _prepare_request_stream(
transaction, guid, request_iterator, *args, **kwargs):
return _prepare_request(
transaction, guid, request_iterator, *args, **kwargs)
def wrap_call(module, object_path, prepare):
def _call_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
uri, method = _get_uri_method(instance)
with ExternalTrace('gRPC', uri, method):
args, kwargs = prepare(transaction, None, *args, **kwargs)
return wrapped(*args, **kwargs)
wrap_function_wrapper(module, object_path, _call_wrapper)
def wrap_future(module, object_path, prepare):
def _future_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
guid = '%016x' % random.getrandbits(64)
uri, method = _get_uri_method(instance)
args, kwargs = prepare(transaction, guid, *args, **kwargs)
future = wrapped(*args, **kwargs)
future._nr_guid = guid
future._nr_args = ('gRPC', uri, method)
future._nr_start_time = time.time()
return future
wrap_function_wrapper(module, object_path, _future_wrapper)
def wrap_next(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
try:
return _wrapped(*_args, **_kwargs)
except Exception:
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
def wrap_result(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
try:
result = _wrapped(*_args, **_kwargs)
except Exception:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
else:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
return result
def _bind_transaction_args(rpc_event, state, behavior, *args, **kwargs):
return rpc_event, behavior
def grpc_web_transaction(wrapped, instance, args, kwargs):
rpc_event, behavior = _bind_transaction_args(*args, **kwargs)
behavior_name = callable_name(behavior)
call_details = (
getattr(rpc_event, 'call_details', None) or
getattr(rpc_event, 'request_call_details', None))
metadata = (
getattr(rpc_event, 'invocation_metadata', None) or
getattr(rpc_event, 'request_metadata', None))
host = port = None
if call_details:
try:
host, port = call_details.host.split(b':', 1)
except Exception:
pass
request_path = call_details.method
return WebTransactionWrapper(
wrapped,
name=behavior_name,
request_path=request_path,
host=host,
port=port,
headers=metadata)(*args, **kwargs)
def _trailing_metadata(state, *args, **kwargs):
return state.trailing_metadata
def _nr_wrap_status_code(wrapped, instance, args, kwargs):
status_code = wrapped(*args, **kwargs)
response_headers = _trailing_metadata(*args, **kwargs)
transaction = current_transaction()
if transaction:
transaction.process_response(status_code, response_headers)
return status_code
def _nr_wrap_abort(wrapped, instance, args, kwargs):
record_exception()
return wrapped(*args, **kwargs)
def instrument_grpc__channel(module):
wrap_call(module, '_UnaryUnaryMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_UnaryUnaryMultiCallable.with_call',
_prepare_request)
wrap_future(module, '_UnaryUnaryMultiCallable.future',
_prepare_request)
wrap_future(module, '_UnaryStreamMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_StreamUnaryMultiCallable.__call__',
_prepare_request_stream)
wrap_call(module, '_StreamUnaryMultiCallable.with_call',
_prepare_request_stream)
wrap_future(module, '_StreamUnaryMultiCallable.future',
_prepare_request_stream)
wrap_future(module, '_StreamStreamMultiCallable.__call__',
_prepare_request_stream)
if hasattr(module, '_MultiThreadedRendezvous'):
wrap_function_wrapper(module, '_MultiThreadedRendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_MultiThreadedRendezvous._next',
wrap_next)
else:
wrap_function_wrapper(module, '_Rendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_Rendezvous._next',
wrap_next)
wrap_function_wrapper(module, '_Rendezvous.cancel',
wrap_result)
def instrument_grpc_server(module):
wrap_function_wrapper(module, '_unary_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_stream_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_completion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abortion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abort',
_nr_wrap_abort)
| true | true |
f71b28ebefb77cb9a3e1c49a1442eb967f6d40ea | 2,038 | py | Python | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | 1 | 2021-02-12T04:15:31.000Z | 2021-02-12T04:15:31.000Z | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from pytorch_lightning.core.optimizer import is_lightning_optimizer
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE, rank_zero_only
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
from pytorch_lightning.overrides.fairscale import LightningShardedDataParallel
class DDPSpawnShardedPlugin(DDPSpawnPlugin):
def configure_ddp(self):
self._wrap_optimizers()
self._model = LightningShardedDataParallel(
self.model, sharded_optimizer=self.lightning_module.trainer.optimizers
)
def _reinit_optimizers_with_oss(self):
optimizers = self.lightning_module.trainer.optimizers
for x, optimizer in enumerate(optimizers):
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if not isinstance(optimizer, OSS):
optim_class = type(optimizer)
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
optimizers[x] = zero_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = trainer.convert_to_lightning_optimizers(optimizers)
def _wrap_optimizers(self):
trainer = self.model.trainer
if trainer.testing:
return
self._reinit_optimizers_with_oss()
def optimizer_state(self, optimizer: 'OSS') -> Optional[dict]:
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if isinstance(optimizer, OSS):
optimizer.consolidate_state_dict()
return self._optim_state_dict(optimizer)
@rank_zero_only
def _optim_state_dict(self, optimizer):
"""
Retrieves state dict only on rank 0, which contains the entire optimizer state after calling
:meth:`consolidate_state_dict`.
"""
return optimizer.state_dict()
| 37.054545 | 108 | 0.707066 | from typing import Optional
from pytorch_lightning.core.optimizer import is_lightning_optimizer
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE, rank_zero_only
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
from pytorch_lightning.overrides.fairscale import LightningShardedDataParallel
class DDPSpawnShardedPlugin(DDPSpawnPlugin):
def configure_ddp(self):
self._wrap_optimizers()
self._model = LightningShardedDataParallel(
self.model, sharded_optimizer=self.lightning_module.trainer.optimizers
)
def _reinit_optimizers_with_oss(self):
optimizers = self.lightning_module.trainer.optimizers
for x, optimizer in enumerate(optimizers):
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if not isinstance(optimizer, OSS):
optim_class = type(optimizer)
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
optimizers[x] = zero_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = trainer.convert_to_lightning_optimizers(optimizers)
def _wrap_optimizers(self):
trainer = self.model.trainer
if trainer.testing:
return
self._reinit_optimizers_with_oss()
def optimizer_state(self, optimizer: 'OSS') -> Optional[dict]:
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if isinstance(optimizer, OSS):
optimizer.consolidate_state_dict()
return self._optim_state_dict(optimizer)
@rank_zero_only
def _optim_state_dict(self, optimizer):
return optimizer.state_dict()
| true | true |
f71b296b1a35ac64d40aa0c9ca07717a2e1e1b1b | 17,693 | py | Python | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | # Split up the pages functionality in separate file to make the code
# easier to read
import os
import re
import webapp2
import jinja2
import json
from google.appengine.ext import ndb
from google.appengine.api import images
# Importing local .py files
from models.users import User, users_key, make_secure_val, check_secure_val
from models.posts import Post, blog_key
from models.comments import Comment, comment_key
from models.likes import Likes
template_dir = os.path.join(os.path.dirname(__file__), '..', 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key.id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
class MainPage(Handler):
def get(self):
# Running a post query for frontpage sorted by created date
posts = Post.query()
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render('index.html', posts=posts)
#### User Pages ####
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class RegisterPage(Handler):
def get(self):
if self.user:
self.redirect("/profile/" + self.user.name)
else:
self.render("register.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.error_msg = [] # setting up a list of potential error messages
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
# add error_msg to params dict if have error
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
u = User.register(
self.username, self.password, self.email,
self.description, self.profile_img)
u.put()
self.login(u)
self.redirect('/profile/' + u.name)
class ProfilePage(Handler):
def get(self, user_profile): # fetching the username from the uri
# get the profile page of the user by name
current_user_profile = User.by_name(user_profile)
if not current_user_profile:
self.response.set_status(404)
self.render("404.html")
return
# run a query of all the posts this user has made
posts = Post.query()
posts = posts.filter(Post.user_id == current_user_profile.key.id())
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render(
'profile.html', user_profile=current_user_profile, posts=posts)
class EditProfilePage(Handler):
def get(self):
# Can only edit your own profile and you must be logged in so just
# checks if user is logged in
if self.user:
user = User.by_id(int(self.user.key.id()))
self.render("edit-profile.html", user=user)
else:
self.redirect("/login")
def post(self):
if not self.user:
self.redirect("/login")
return
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.delete_profile_img = self.request.get('delete_profile_img')
self.user_id = self.user.key.id()
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if self.password:
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u and not self.username == self.user.name:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
user_update = User.update(self.username, self.password, self.email,
self.description, self.profile_img,
self.delete_profile_img, self.user_id)
self.redirect('/profile/' + self.user.name)
class LoginPage(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
# simplified the error message handling as there is only one error
# message possible
msg = 'Invalid login'
self.render('login.html', error=msg)
class Logout(Handler):
def get(self):
self.logout() # Call logout function of parent class Handler
self.redirect('/') # Redirect to frontpage on logout
#### Blog Pages ####
class BlogPage(Handler):
def get(self):
# if you try to reach /blog then redirect to frontpage.
self.redirect('/')
class PostPage(Handler):
def get(self, post_id): # get the post_id from the uri
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
# used for comments if there is an error in the comment form
# See CommentPost class
comment_error = self.request.get('comment_error')
if not post:
# If there is no post then show a 404 as users can delete their own
# posts
self.response.set_status(404)
self.render("404.html")
return
# Collect the comments that belongs to this post
comments = Comment.by_post_id(int(post_id))
# For smarter rendering in the template I put the comments in a list
# with a dict inside
comment_output = []
# loop through each comment and create the dict for each comment
for comment in comments:
user_name = User.username_by_id(int(comment.user_id))
if not user_name:
user_name = "Deleted User"
comment = dict(content=comment.content, created=comment.created,
user_name=user_name, comment_id=comment.key.id())
comment_output.append(comment)
author = User.by_id(post.user_id)
self.render(
"post.html", post=post, author=author,
comment_output=comment_output, comment_error=comment_error)
class CreatePostPage(Handler):
def get(self):
if self.user:
self.render('create-post.html')
else:
self.redirect('/login')
def post(self):
if not self.user:
self.redirect('/login')
subject = self.request.get('subject')
content = self.request.get('content')
user_id = self.user.key.id()
featured_img = self.request.get('featured_img')
# error handling is done inside the Post.create class found in posts.py
post = Post.create(subject, content, featured_img, user_id)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('create-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class EditPost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
self.render("edit-post.html", post=post)
else:
self.redirect('/blog/' + post_id)
def post(self, post_id):
if not self.user:
self.redirect('/login')
return
subject = self.request.get('subject')
content = self.request.get('content')
# Possible to delete featured image so added one extra var
# for edit post
delete_featured_img = self.request.get('delete_featured_img')
featured_img = self.request.get('featured_img')
# error handling is done inside the Post.update class found in posts.py
post = Post.update(
int(post_id), subject, content, featured_img, delete_featured_img)
if post.has_error:
# If errors show the form again with the error messages
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('edit-post.html', **params)
else:
# Else redirect to the updated post
self.redirect('/blog/%s' % str(post.p.key.id()))
class DeletePost(Handler):
def get(self, post_id):
if not self.user: # check if user is logged in
self.redirect('/login')
return
post = Post.by_id(int(post_id))
# check if user the same as the author
if post.user_id == self.user.key.id():
post.key.delete()
self.redirect('/profile/' + self.user.name)
else:
self.redirect('/blog/' + post_id)
class DeleteComment(Handler):
def get(self, comment_id, post_id):
if not self.user: # check if user is logged in
self.redirect('/login')
return
self.write(comment_id)
comment = Comment.get_by_id(int(comment_id), parent=comment_key())
if not comment:
self.redirect('/blog/' + post_id + '#comments-list')
return
# check if user is the same as the author
if comment.user_id == self.user.key.id():
comment.key.delete()
self.redirect('/blog/' + post_id + '#comments-list')
else:
self.redirect('/blog/' + post_id + '#comments-list')
class CommentPost(Handler):
def post(self, post_id):
if not self.user:
self.rediret('/login')
return
content = self.request.get('comment')
user_id = self.user.key.id()
comment = Comment.create(content, post_id, user_id)
if comment.has_error:
self.redirect(
"/blog/" + post_id + "?comment_error=true#commentform")
# redirect to PostPage class where the error messsage is handled
else:
self.redirect('/blog/%s#%s' % (str(post_id), "comments-list"))
class LikePost(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
error = dict()
response = None
logged_in = False
if post:
author_id = post.user_id
if self.user:
logged_in = True
if author_id == self.user.key.id():
error['has_error'] = True
error['error_msg'] = "Can't like your own post"
else:
add_like = Likes.add_like(int(post_id), self.user.key.id())
response = add_like.response
else:
error['has_error'] = True
error['error_msg'] = "No post found"
self.write(
json.dumps(({'logged_in': logged_in,
'response': response, 'error': error})))
class MissingPage(Handler):
def get(self):
# If a user tries to write a url that doesn't exist fallback is a 404
# template
self.response.set_status(404)
self.render("404.html")
return
class RouteProfile(Handler):
def get(self):
# If a user tries to visit just /profile rediret to frontpage
self.redirect('/')
class Image(Handler):
""" Class for image handling.
There are two types of images; Featured Image (featured_img) for posts
and Profile Images (profile_img) for profile.
This is used to create a unique url for each image and make it possible to serve images.
"""
def get(self):
img_id = self.request.get('id')
img_type = self.request.get('type')
if img_id.isdigit():
if img_type == 'featured_img':
img_key = ndb.Key('Post', int(img_id), parent=blog_key())
elif img_type == "profile_img":
img_key = ndb.Key('User', int(img_id), parent=users_key())
if img_key:
img = img_key.get()
if img_type == "featured_img":
if img.featured_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.featured_img)
return
elif img_type == "profile_img":
if img.profile_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.profile_img)
return
self.response.set_status(404)
self.render("404.html")
appLoader = webapp2.WSGIApplication([('/', MainPage),
('/register', RegisterPage),
('/login', LoginPage),
('/logout', Logout),
('/profile', RouteProfile),
('/profile/(\w+)', ProfilePage),
('/edit-profile', EditProfilePage),
('/create-post', CreatePostPage),
('/blog/([0-9]+)/edit', EditPost),
('/blog/([0-9]+)/delete', DeletePost),
('/comment/([0-9]+)', CommentPost),
('/blog/([0-9]+)/like', LikePost),
('/comment/([0-9]+)/([0-9]+)/delete',
DeleteComment),
('/blog', BlogPage),
('/blog/([0-9]+)', PostPage),
('/img', Image),
('/.*', MissingPage)
],
debug=True)
| 33.50947 | 92 | 0.562652 |
import os
import re
import webapp2
import jinja2
import json
from google.appengine.ext import ndb
from google.appengine.api import images
from models.users import User, users_key, make_secure_val, check_secure_val
from models.posts import Post, blog_key
from models.comments import Comment, comment_key
from models.likes import Likes
template_dir = os.path.join(os.path.dirname(__file__), '..', 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key.id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
class MainPage(Handler):
def get(self):
posts = Post.query()
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render('index.html', posts=posts)
rname(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class RegisterPage(Handler):
def get(self):
if self.user:
self.redirect("/profile/" + self.user.name)
else:
self.render("register.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
u = User.by_name(self.username)
if u:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
u = User.register(
self.username, self.password, self.email,
self.description, self.profile_img)
u.put()
self.login(u)
self.redirect('/profile/' + u.name)
class ProfilePage(Handler):
def get(self, user_profile): # fetching the username from the uri
# get the profile page of the user by name
current_user_profile = User.by_name(user_profile)
if not current_user_profile:
self.response.set_status(404)
self.render("404.html")
return
# run a query of all the posts this user has made
posts = Post.query()
posts = posts.filter(Post.user_id == current_user_profile.key.id())
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render(
'profile.html', user_profile=current_user_profile, posts=posts)
class EditProfilePage(Handler):
def get(self):
# Can only edit your own profile and you must be logged in so just
# checks if user is logged in
if self.user:
user = User.by_id(int(self.user.key.id()))
self.render("edit-profile.html", user=user)
else:
self.redirect("/login")
def post(self):
if not self.user:
self.redirect("/login")
return
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.delete_profile_img = self.request.get('delete_profile_img')
self.user_id = self.user.key.id()
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if self.password:
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u and not self.username == self.user.name:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
user_update = User.update(self.username, self.password, self.email,
self.description, self.profile_img,
self.delete_profile_img, self.user_id)
self.redirect('/profile/' + self.user.name)
class LoginPage(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
msg = 'Invalid login'
self.render('login.html', error=msg)
class Logout(Handler):
def get(self):
self.logout()
self.redirect('/')
self.redirect('/')
class PostPage(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
comment_error = self.request.get('comment_error')
if not post:
self.response.set_status(404)
self.render("404.html")
return
comments = Comment.by_post_id(int(post_id))
comment_output = []
for comment in comments:
user_name = User.username_by_id(int(comment.user_id))
if not user_name:
user_name = "Deleted User"
comment = dict(content=comment.content, created=comment.created,
user_name=user_name, comment_id=comment.key.id())
comment_output.append(comment)
author = User.by_id(post.user_id)
self.render(
"post.html", post=post, author=author,
comment_output=comment_output, comment_error=comment_error)
class CreatePostPage(Handler):
def get(self):
if self.user:
self.render('create-post.html')
else:
self.redirect('/login')
def post(self):
if not self.user:
self.redirect('/login')
subject = self.request.get('subject')
content = self.request.get('content')
user_id = self.user.key.id()
featured_img = self.request.get('featured_img')
post = Post.create(subject, content, featured_img, user_id)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('create-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class EditPost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
self.render("edit-post.html", post=post)
else:
self.redirect('/blog/' + post_id)
def post(self, post_id):
if not self.user:
self.redirect('/login')
return
subject = self.request.get('subject')
content = self.request.get('content')
delete_featured_img = self.request.get('delete_featured_img')
featured_img = self.request.get('featured_img')
post = Post.update(
int(post_id), subject, content, featured_img, delete_featured_img)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('edit-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class DeletePost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
post.key.delete()
self.redirect('/profile/' + self.user.name)
else:
self.redirect('/blog/' + post_id)
class DeleteComment(Handler):
def get(self, comment_id, post_id):
if not self.user:
self.redirect('/login')
return
self.write(comment_id)
comment = Comment.get_by_id(int(comment_id), parent=comment_key())
if not comment:
self.redirect('/blog/' + post_id + '#comments-list')
return
if comment.user_id == self.user.key.id():
comment.key.delete()
self.redirect('/blog/' + post_id + '#comments-list')
else:
self.redirect('/blog/' + post_id + '#comments-list')
class CommentPost(Handler):
def post(self, post_id):
if not self.user:
self.rediret('/login')
return
content = self.request.get('comment')
user_id = self.user.key.id()
comment = Comment.create(content, post_id, user_id)
if comment.has_error:
self.redirect(
"/blog/" + post_id + "?comment_error=true#commentform")
else:
self.redirect('/blog/%s#%s' % (str(post_id), "comments-list"))
class LikePost(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
error = dict()
response = None
logged_in = False
if post:
author_id = post.user_id
if self.user:
logged_in = True
if author_id == self.user.key.id():
error['has_error'] = True
error['error_msg'] = "Can't like your own post"
else:
add_like = Likes.add_like(int(post_id), self.user.key.id())
response = add_like.response
else:
error['has_error'] = True
error['error_msg'] = "No post found"
self.write(
json.dumps(({'logged_in': logged_in,
'response': response, 'error': error})))
class MissingPage(Handler):
def get(self):
# If a user tries to write a url that doesn't exist fallback is a 404
self.response.set_status(404)
self.render("404.html")
return
class RouteProfile(Handler):
def get(self):
self.redirect('/')
class Image(Handler):
def get(self):
img_id = self.request.get('id')
img_type = self.request.get('type')
if img_id.isdigit():
if img_type == 'featured_img':
img_key = ndb.Key('Post', int(img_id), parent=blog_key())
elif img_type == "profile_img":
img_key = ndb.Key('User', int(img_id), parent=users_key())
if img_key:
img = img_key.get()
if img_type == "featured_img":
if img.featured_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.featured_img)
return
elif img_type == "profile_img":
if img.profile_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.profile_img)
return
self.response.set_status(404)
self.render("404.html")
appLoader = webapp2.WSGIApplication([('/', MainPage),
('/register', RegisterPage),
('/login', LoginPage),
('/logout', Logout),
('/profile', RouteProfile),
('/profile/(\w+)', ProfilePage),
('/edit-profile', EditProfilePage),
('/create-post', CreatePostPage),
('/blog/([0-9]+)/edit', EditPost),
('/blog/([0-9]+)/delete', DeletePost),
('/comment/([0-9]+)', CommentPost),
('/blog/([0-9]+)/like', LikePost),
('/comment/([0-9]+)/([0-9]+)/delete',
DeleteComment),
('/blog', BlogPage),
('/blog/([0-9]+)', PostPage),
('/img', Image),
('/.*', MissingPage)
],
debug=True)
| true | true |
f71b2aad75e30594e61025ad33be2a2c17932235 | 2,792 | py | Python | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | 1 | 2022-03-07T12:18:00.000Z | 2022-03-07T12:18:00.000Z | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | """
Implementation of a network using an Encoder-Decoder architecture.
"""
import torch.nn as tnn
from torch import Tensor
from reinvent_models.link_invent.networks.decoder import Decoder
from reinvent_models.link_invent.networks.encoder import Encoder
class EncoderDecoder(tnn.Module):
"""
An encoder-decoder that combines input with generated targets.
"""
def __init__(self, encoder_params: dict, decoder_params: dict):
super(EncoderDecoder, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs: Tensor, encoder_seq_lengths: Tensor, decoder_seqs: Tensor,
decoder_seq_lengths: Tensor):
"""
Performs the forward pass.
:param encoder_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param encoder_seq_lengths: A list with the length of each input sequence.
:param decoder_seqs: A tensor with the encoded input input sequences (batch, seq_e, dim).
:param decoder_seq_lengths: The lengths of the decoder sequences.
:return : The output logits as a tensor (batch, seq_d, dim).
"""
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs: Tensor, seq_lengths: Tensor):
"""
Does a forward pass only of the encoder.
:param padded_seqs: The data to feed the encoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns a tuple with (encoded_seqs, hidden_states)
"""
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs: Tensor, seq_lengths: Tensor, encoder_padded_seqs: Tensor,
hidden_states: Tensor):
"""
Does a forward pass only of the decoder.
:param hidden_states: The hidden states from the encoder.
:param padded_seqs: The data to feed to the decoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns the logits and the hidden state for each element of the sequence passed.
"""
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
}
| 42.30303 | 115 | 0.666189 |
import torch.nn as tnn
from torch import Tensor
from reinvent_models.link_invent.networks.decoder import Decoder
from reinvent_models.link_invent.networks.encoder import Encoder
class EncoderDecoder(tnn.Module):
def __init__(self, encoder_params: dict, decoder_params: dict):
super(EncoderDecoder, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs: Tensor, encoder_seq_lengths: Tensor, decoder_seqs: Tensor,
decoder_seq_lengths: Tensor):
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs: Tensor, seq_lengths: Tensor):
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs: Tensor, seq_lengths: Tensor, encoder_padded_seqs: Tensor,
hidden_states: Tensor):
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
}
| true | true |
f71b2accb33c9e4fb30a401746d3041c3b953b26 | 11,528 | py | Python | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ROS node libs
import time
import numpy as np
import rospy
import torch
# from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
from pyquaternion import Quaternion
from google.protobuf import text_format
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Header, ColorRGBA
# from cv_bridge import CvBridge, CvBridgeError
from visualization_msgs.msg import Marker, MarkerArray
from second.protos import pipeline_pb2
# from second.utils import simplevis
from second.pytorch.train import build_network
from second.utils import config_tool
from std_msgs.msg import Int16, Float32MultiArray
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
# import ros_numpy
# GPU settings: Select GPUs to use. Coment it to let the system decide
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
class ros_tensorflow_obj():
def __init__(self):
# ## Initial msg
rospy.loginfo(' ## Starting ROS interface ##')
# ## Load a (frozen) Tensorflow model into memory.
print("ready to process----------------------------------------------------------")
####################################################################################333
# config_path = "../configs/nuscenes/all.pp.largea.config"
# config_path = "/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_28.config"
config_path = "/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_24.config"
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
# config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ckpt_path = "../checkpoint/voxelnet-140670.tckpt"
ckpt_path="/home/mayank_sati/Downloads/pretrained_models_v1.5/pp_model_for_nuscenes_pretrain/voxelnet-296960.tckpt"
net = build_network(model_cfg).to(device).eval()
net.load_state_dict(torch.load(ckpt_path))
target_assigner = net.target_assigner
self.voxel_generator = net.voxel_generator
class_names = target_assigner.classes
grid_size = self.voxel_generator.grid_size
feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)
feature_map_size = [*feature_map_size, 1][::-1]
anchors = target_assigner.generate_anchors(feature_map_size)["anchors"]
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
anchors = anchors.view(1, -1, 7)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
feature_map_size = [1, 50, 50]
ret = target_assigner.generate_anchors(feature_map_size)
class_names = target_assigner.classes
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors_list = []
for k, v in anchors_dict.items():
anchors_list.append(v["anchors"])
# anchors = ret["anchors"]
anchors = np.concatenate(anchors_list, axis=0)
anchors = anchors.reshape([-1, target_assigner.box_ndim])
assert np.allclose(anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim))
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
# anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
anchors_bv = 2
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
self.anchors = anchors.view(1, -1, 7)
self.net = net
self.device = device
##########################################################################################
# self.marker_publisher = rospy.Publisher('visualization_marker', MarkerArray, queue_size=5)
self.pcl_publisher = rospy.Publisher('result_pcl', PointCloud2, queue_size=1)
############
# [print(n.name) for n in tf.get_default_graph().as_graph_def().node]
# ROS environment setup
# ## Define subscribers
self.subscribers_def()
# ## Define publishers
self.publishers_def()
self.now = rospy.Time.now()
# Define subscribers
def subscribers_def(self):
# subs_topic = '/kitti/velo/pointcloud'
#subs_topic = '/apollo/sensor/velodyne64/compensator/PointCloud2'
# subs_topic = '/velodyne64_points'
# subs_topic = '/apollo/sensor/velodyne64/PointCloud2'
# subs_topic = '/points_raw'
# subs_topic = '/livox/lidar'
# subs_topic = '/apollo/sensor/velodyne32C/compensator/PointCloud2'
subs_topic = '/lidar_top'
self._sub = rospy.Subscriber(subs_topic, PointCloud2, self.lidar_callback, queue_size=10, buff_size=2 ** 24)
# mydata = rospy.Subscriber( subs_topic , PointCloud2, self.lidar_callback, queue_size=1, buff_size=2**24)
# print(mydata)
# self._sub = rospy.Subscriber( subs_topic , Image, self.lidar_callback, queue_size=1, buff_size=100)
# Define publishers
def publishers_def(self):
self._pub = rospy.Publisher('pc_bbox_topic', Float32MultiArray, queue_size=1)
self.pub_arr_bbox = rospy.Publisher("Detections", BoundingBoxArray, queue_size=1)
# Camera image callback
def lidar_callback(self, point_cl_msg):
arr_bbox = BoundingBoxArray()
############################################################################3
# lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)
# points = lidar.reshape(-1, 4)
# print('gotit"')
# pc = ros_numpy.numpify(point_cl_msg)
# points = np.zeros((pc.shape[0], 4))
# points[:, 0] = pc['x']
# points[:, 1] = pc['y']
# points[:, 2] = pc['z']
# points[:, 3] = pc['intensity']
# points[:, 3] /= 255
#########################################################333
lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)
points = lidar.reshape(-1, 4)
points[:, 3] /= 255
#######################################################################
res = self.voxel_generator.generate(points, max_voxels=30000)
voxels = res["voxels"]
coords = res["coordinates"]
num_points = res["num_points_per_voxel"]
num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
# print("voxel_generator_time",(time.time() - t)*1000)
###############################################################
# print(voxels.shape)
# add batch idx to coords
coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)
voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)
coords = torch.tensor(coords, dtype=torch.int32, device=self.device)
num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)
# print("conversion time",(time.time() - t)*1000)
example = {"anchors": self.anchors, "voxels": voxels, "num_points": num_points, "coordinates": coords, }
t2 = time.time()
pred = self.net(example)[0]
# print(pred)
# print("prediction",(time.time() - t2)*1000)
# print("total_time",(time.time() - t)*1000)
boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()
scores_lidar = pred["scores"].detach().cpu().numpy()
labels_lidar = pred["label_preds"].detach().cpu().numpy()
##############################3333
threshold = 0.2
keep = np.where((scores_lidar >= threshold))[0]
scores_lidar = scores_lidar[keep]
print(scores_lidar)
boxes_lidar = boxes_lidar[keep]
labels_lidar = labels_lidar[keep]
# sco
# print(scores_lidar)
################################################################################
# self.show_text_in_rviz_mullti_cube(boxes_lidar,point_cl_msg)
# self.show_text_in_rviz_mullti_sphere(boxes_lidar,point_cl_msg)
##################################################################################
# apollo integration
# numboxes = np.squeeze(scores_lidar)
numboxes = len(scores_lidar)
tl_bbox = Float32MultiArray()
iLen = boxes_lidar.shape[0]
lidar_bbox = Float32MultiArray()
print('Processing no of object:', iLen)
if (numboxes) >= 1:
tmp = -np.ones(10 * (numboxes) + 1)
for i in range(0, int(numboxes)):
try:
score = float((scores_lidar)[i])
if (boxes_lidar.shape[0]) == 1:
bboxes = [float(v) for v in (boxes_lidar)[i]]
else:
bboxes = [float(v) for v in np.squeeze(boxes_lidar)[i]]
tmp[0] = numboxes
tmp[10 * i + 1] = score
tmp[10 * i + 2] = bboxes[0]
tmp[10 * i + 3] = bboxes[1]
tmp[10 * i + 4] = bboxes[2]
tmp[10 * i + 5] = bboxes[3]
tmp[10 * i + 6] = bboxes[4]
tmp[10 * i + 7] = bboxes[5]
tmp[10 * i + 8] = bboxes[6]
tmp[10 * i + 9] = 0
tmp[10 * i + 10] = 0
bbox = BoundingBox()
# bbox.header.frame_id = point_cl_msg.header.frame_id
# bbox.header.frame_id = 'livox_frame'
bbox.header.frame_id = 'lidar_top'
q = Quaternion(axis=(0, 0, 1), radians=-1.0 * float(boxes_lidar[i][6]))
bbox.pose.orientation.x = q.x
bbox.pose.orientation.y = q.y
bbox.pose.orientation.z = q.z
bbox.pose.orientation.w = q.w
bbox.pose.position.x = float(boxes_lidar[i][0])
bbox.pose.position.y = float(boxes_lidar[i][1])
bbox.pose.position.z = float(boxes_lidar[i][2])
bbox.dimensions.x = float(boxes_lidar[i][3])
bbox.dimensions.y = float(boxes_lidar[i][4])
bbox.dimensions.z = float(boxes_lidar[i][5])
arr_bbox.boxes.append(bbox)
except:
print("I am here")
# here data for publishing
tl_bbox.data = tmp
self._pub.publish(tl_bbox)
arr_bbox.header.frame_id = point_cl_msg.header.frame_id
self.pub_arr_bbox.publish(arr_bbox)
point_cl_msg.header.frame_id = point_cl_msg.header.frame_id
self.pcl_publisher.publish(point_cl_msg)
arr_bbox.boxes.clear()
def spin(self):
rospy.spin()
def main():
rospy.init_node('LIDAR_NODE', anonymous=True)
tf_ob = ros_tensorflow_obj()
# tf_ob.subscribers_def
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
| 45.207843 | 128 | 0.567661 |
import time
import numpy as np
import rospy
import torch
from pyquaternion import Quaternion
from google.protobuf import text_format
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Header, ColorRGBA
from visualization_msgs.msg import Marker, MarkerArray
from second.protos import pipeline_pb2
from second.pytorch.train import build_network
from second.utils import config_tool
from std_msgs.msg import Int16, Float32MultiArray
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
class ros_tensorflow_obj():
def __init__(self):
# Starting ROS interface ##')
| true | true |
f71b2b5262d128663739d9f88003925845a959b1 | 27,166 | py | Python | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Breneman Corresponding Chromaticities Dataset
=============================================
Defines *Breneman (1987)* results for corresponding chromaticities experiments.
See Also
--------
`Corresponding Chromaticities Prediction Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/corresponding/prediction.ipynb>`_
References
----------
- :cite:`Breneman1987b` : Breneman, E. J. (1987). Corresponding
chromaticities for different states of adaptation to complex visual fields.
Journal of the Optical Society of America A, 4(6), 1115.
doi:10.1364/JOSAA.4.001115
"""
from __future__ import division, unicode_literals
import numpy as np
from collections import namedtuple
from colour.utilities.documentation import DocstringDict
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',
'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',
'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',
'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',
'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',
'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',
'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',
'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'
]
class BrenemanExperimentResult(
namedtuple('BrenemanExperimentResult',
('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):
"""
Experiment result.
Parameters
----------
name : unicode
Test colour name.
uv_t : numeric
Chromaticity coordinates :math:`uv_t^p` of test colour.
uv_m : array_like, (2,)
Chromaticity coordinates :math:`uv_m^p` of matching colour.
s_uv : array_like, (2,), optional
Interobserver variation (:math:`x10^3`) :math:`\\sigma_uv^p`.
d_uv_i : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_i^p`.
d_uv_g : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_g^p`.
"""
def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
BrenemanExperimentResult` class.
"""
return super(BrenemanExperimentResult, cls).__new__(
cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),
np.array(d_uv_i), np.array(d_uv_g))
class PrimariesChromaticityCoordinates(
namedtuple(
'PrimariesChromaticityCoordinates',
('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):
"""
Chromaticity coordinates of primaries.
Parameters
----------
experiment : integer
Experiment.
illuminants : array_like, (2,)
Chromaticity coordinates :math:`uv_t^p` of test colour.
Y : numeric
White luminance :math:`Y` in :math:`cd/m^2`.
P_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`P`.
D_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`D`.
T_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`T`.
"""
def __new__(cls,
experiment,
illuminants,
Y,
P_uvp=None,
D_uvp=None,
T_uvp=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
PrimariesChromaticityCoordinates` class.
"""
return super(PrimariesChromaticityCoordinates, cls).__new__(
cls, experiment, np.array(illuminants), np.array(Y),
np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))
# yapf: disable
BRENEMAN_EXPERIMENT_1_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.259, 0.526), (0.200, 0.475)),
BrenemanExperimentResult(
'Gray',
(0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),
BrenemanExperimentResult(
'Sky',
(0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_1_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_2_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.222, 0.521), (0.204, 0.479)),
BrenemanExperimentResult(
'Gray',
(0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),
BrenemanExperimentResult(
'Skin',
(0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),
BrenemanExperimentResult(
'Orange',
(0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),
BrenemanExperimentResult(
'Brown',
(0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),
BrenemanExperimentResult(
'Yellow',
(0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),
BrenemanExperimentResult(
'Green',
(0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),
BrenemanExperimentResult(
'Blue',
(0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),
BrenemanExperimentResult(
'Sky',
(0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))
# yapf: enable
"""
*Breneman (1987)* experiment 2 results.
BRENEMAN_EXPERIMENT_2_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_3_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.223, 0.521), (0.206, 0.478)),
BrenemanExperimentResult(
'Gray',
(0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),
BrenemanExperimentResult(
'Skin',
(0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),
BrenemanExperimentResult(
'Orange',
(0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),
BrenemanExperimentResult(
'Brown',
(0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),
BrenemanExperimentResult(
'Yellow',
(0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),
BrenemanExperimentResult(
'Green',
(0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),
BrenemanExperimentResult(
'Blue',
(0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),
BrenemanExperimentResult(
'Sky',
(0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))
# yapf: enable
"""
*Breneman (1987)* experiment 3 results.
BRENEMAN_EXPERIMENT_3_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_4_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.523), (0.199, 0.467)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))
# yapf: enable
"""
*Breneman (1987)* experiment 4 results.
BRENEMAN_EXPERIMENT_4_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_5_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.028, 0.480), (0.212, 0.491), (2, 2)),
BrenemanExperimentResult(
'Red',
(0.449, 0.512), (0.408, 0.514), (11, 5)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.262, 0.511), (4, 2)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.303, 0.545), (4, 3)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.303, 0.538), (4, 4)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.264, 0.550), (3, 2)),
BrenemanExperimentResult(
'Foliage',
(0.224, 0.538), (0.227, 0.535), (3, 3)),
BrenemanExperimentResult(
'Green',
(0.134, 0.531), (0.159, 0.530), (9, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.474), (0.165, 0.490), (8, 3)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.329), (0.173, 0.378), (7, 12)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.189, 0.462), (5, 4)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.364), (0.239, 0.401), (4, 16)))
# yapf: enable
"""
*Breneman (1987)* experiment 5 results.
BRENEMAN_EXPERIMENT_5_RESULTS : tuple
Notes
-----
- Effective White Levels : 130 and 2120 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_6_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.257, 0.525), (0.201, 0.482)),
BrenemanExperimentResult(
'Gray',
(0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),
BrenemanExperimentResult(
'Skin',
(0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),
BrenemanExperimentResult(
'Orange',
(0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),
BrenemanExperimentResult(
'Foliage',
(0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))
# yapf: enable
"""
*Breneman (1987)* experiment 6 results.
BRENEMAN_EXPERIMENT_6_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D55*
- White Luminance : 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_7_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.481), (0.211, 0.486), (2, 3)),
BrenemanExperimentResult(
'Red',
(0.448, 0.512), (0.409, 0.516), (9, 2)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.256, 0.506), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.305, 0.547), (5, 4)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.301, 0.539), (5, 2)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.257, 0.552), (3, 4)),
BrenemanExperimentResult(
'Foliage',
(0.225, 0.538), (0.222, 0.536), (3, 2)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.153, 0.529), (8, 2)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.475), (0.160, 0.484), (3, 5)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.331), (0.171, 0.379), (4, 11)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.187, 0.452), (4, 7)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.365), (0.240, 0.398), (4, 10)))
# yapf: enable
"""
*Breneman (1987)* experiment 7 results.
BRENEMAN_EXPERIMENT_7_RESULTS : tuple
Notes
-----
- Effective White Levels : 850 and 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_8_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.524), (0.195, 0.469)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),
BrenemanExperimentResult(
'Foliage',
(0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),
BrenemanExperimentResult(
'Blue-green',
(0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),
BrenemanExperimentResult(
'Sky',
(0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))
# yapf: enable
"""
*Breneman (1987)* experiment 8 results.
BRENEMAN_EXPERIMENT_8_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 350 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_9_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.254, 0.525), (0.195, 0.465)),
BrenemanExperimentResult(
'Gray',
(0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),
BrenemanExperimentResult(
'Foliage',
(0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),
BrenemanExperimentResult(
'Blue-green',
(0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),
BrenemanExperimentResult(
'(Gray)h',
(0.256, 0.525), (0.208, 0.498)),
BrenemanExperimentResult(
'(Red)h',
(0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),
BrenemanExperimentResult(
'(Brown)h',
(0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),
BrenemanExperimentResult(
'(Foliage)h',
(0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),
BrenemanExperimentResult(
'(Green)h',
(0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),
BrenemanExperimentResult(
'(Blue)h',
(0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),
BrenemanExperimentResult(
'(Purple)h',
(0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))
# yapf: enable
"""
*Breneman (1987)* experiment 9 results.
BRENEMAN_EXPERIMENT_9_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 15 :math:`cd/m^2`
- Observers Count : 8
- The colors indicated by (.)h are the darker colors presented at the higher
luminescence level of the lighter colors.
"""
# yapf: disable
BRENEMAN_EXPERIMENT_10_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.213, 0.494), (3, 3)),
BrenemanExperimentResult(
'Red',
(0.447, 0.512), (0.411, 0.506), (15, 7)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.269, 0.511), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.315, 0.536), (7, 8)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.310, 0.526), (6, 8)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.556), (0.268, 0.541), (3, 6)),
BrenemanExperimentResult(
'Foliage',
(0.226, 0.538), (0.230, 0.525), (4, 8)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.158, 0.524), (6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.476), (0.161, 0.491), (4, 4)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.330), (0.171, 0.377), (6, 19)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.439), (0.187, 0.465), (5, 5)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.240, 0.402), (3, 12)))
# yapf: enable
"""
*Breneman (1987)* experiment 10 results.
BRENEMAN_EXPERIMENT_10_RESULTS : tuple
Notes
-----
- Effective White Levels : 15 and 270 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_11_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.208, 0.482), (0.174, 0.520)),
BrenemanExperimentResult(
'Gray',
(0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),
BrenemanExperimentResult(
'Green',
(0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_11_RESULTS : tuple
Notes
-----
- Illuminants : *green*, *D65*
- White Luminance : 1560 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_12_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.205, 0.482), (0.174, 0.519)),
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),
BrenemanExperimentResult(
'Brown',
(0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),
BrenemanExperimentResult(
'Green',
(0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))
# yapf: enable
"""
*Breneman (1987)* experiment 12 results.
BRENEMAN_EXPERIMENT_12_RESULTS : tuple
Notes
-----
- Illuminants : *D55*, *green*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({
1: PrimariesChromaticityCoordinates(
1, ('A', 'D65'), 1500,
(0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),
2: PrimariesChromaticityCoordinates(
2, ('Projector', 'D55'), 1500,
(0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),
3: PrimariesChromaticityCoordinates(
3, ('Projector', 'D55'), 75,
(0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),
4: PrimariesChromaticityCoordinates(
4, ('A', 'D65'), 75,
(0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),
6: PrimariesChromaticityCoordinates(
6, ('A', 'D55'), 11100,
(0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),
8: PrimariesChromaticityCoordinates(
8, ('A', 'D65'), 350,
(0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),
9: PrimariesChromaticityCoordinates(
9, ('A', 'D65'), 15,
(0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),
11: PrimariesChromaticityCoordinates(
11, ('D55', 'green'), 1560,
(0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),
12: PrimariesChromaticityCoordinates(
12, ('D55', 'green'), 75,
(0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})
# yapf: enable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = """
*Breneman (1987)* experiments primaries chromaticities.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict
"""
BRENEMAN_EXPERIMENTS = DocstringDict({
1: BRENEMAN_EXPERIMENT_1_RESULTS,
2: BRENEMAN_EXPERIMENT_2_RESULTS,
3: BRENEMAN_EXPERIMENT_3_RESULTS,
4: BRENEMAN_EXPERIMENT_4_RESULTS,
5: BRENEMAN_EXPERIMENT_5_RESULTS,
6: BRENEMAN_EXPERIMENT_6_RESULTS,
7: BRENEMAN_EXPERIMENT_7_RESULTS,
8: BRENEMAN_EXPERIMENT_8_RESULTS,
9: BRENEMAN_EXPERIMENT_9_RESULTS,
10: BRENEMAN_EXPERIMENT_10_RESULTS,
11: BRENEMAN_EXPERIMENT_11_RESULTS,
12: BRENEMAN_EXPERIMENT_12_RESULTS
})
BRENEMAN_EXPERIMENTS.__doc__ = """
*Breneman (1987)* experiments.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS : dict
"""
| 32.035377 | 79 | 0.536406 |
from __future__ import division, unicode_literals
import numpy as np
from collections import namedtuple
from colour.utilities.documentation import DocstringDict
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',
'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',
'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',
'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',
'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',
'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',
'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',
'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'
]
class BrenemanExperimentResult(
namedtuple('BrenemanExperimentResult',
('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):
def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):
return super(BrenemanExperimentResult, cls).__new__(
cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),
np.array(d_uv_i), np.array(d_uv_g))
class PrimariesChromaticityCoordinates(
namedtuple(
'PrimariesChromaticityCoordinates',
('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):
def __new__(cls,
experiment,
illuminants,
Y,
P_uvp=None,
D_uvp=None,
T_uvp=None):
return super(PrimariesChromaticityCoordinates, cls).__new__(
cls, experiment, np.array(illuminants), np.array(Y),
np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))
BRENEMAN_EXPERIMENT_1_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.259, 0.526), (0.200, 0.475)),
BrenemanExperimentResult(
'Gray',
(0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),
BrenemanExperimentResult(
'Sky',
(0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))
BRENEMAN_EXPERIMENT_2_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.222, 0.521), (0.204, 0.479)),
BrenemanExperimentResult(
'Gray',
(0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),
BrenemanExperimentResult(
'Skin',
(0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),
BrenemanExperimentResult(
'Orange',
(0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),
BrenemanExperimentResult(
'Brown',
(0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),
BrenemanExperimentResult(
'Yellow',
(0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),
BrenemanExperimentResult(
'Green',
(0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),
BrenemanExperimentResult(
'Blue',
(0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),
BrenemanExperimentResult(
'Sky',
(0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))
BRENEMAN_EXPERIMENT_3_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.223, 0.521), (0.206, 0.478)),
BrenemanExperimentResult(
'Gray',
(0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),
BrenemanExperimentResult(
'Skin',
(0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),
BrenemanExperimentResult(
'Orange',
(0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),
BrenemanExperimentResult(
'Brown',
(0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),
BrenemanExperimentResult(
'Yellow',
(0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),
BrenemanExperimentResult(
'Green',
(0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),
BrenemanExperimentResult(
'Blue',
(0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),
BrenemanExperimentResult(
'Sky',
(0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))
BRENEMAN_EXPERIMENT_4_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.523), (0.199, 0.467)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))
BRENEMAN_EXPERIMENT_5_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.028, 0.480), (0.212, 0.491), (2, 2)),
BrenemanExperimentResult(
'Red',
(0.449, 0.512), (0.408, 0.514), (11, 5)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.262, 0.511), (4, 2)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.303, 0.545), (4, 3)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.303, 0.538), (4, 4)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.264, 0.550), (3, 2)),
BrenemanExperimentResult(
'Foliage',
(0.224, 0.538), (0.227, 0.535), (3, 3)),
BrenemanExperimentResult(
'Green',
(0.134, 0.531), (0.159, 0.530), (9, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.474), (0.165, 0.490), (8, 3)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.329), (0.173, 0.378), (7, 12)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.189, 0.462), (5, 4)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.364), (0.239, 0.401), (4, 16)))
BRENEMAN_EXPERIMENT_6_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.257, 0.525), (0.201, 0.482)),
BrenemanExperimentResult(
'Gray',
(0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),
BrenemanExperimentResult(
'Skin',
(0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),
BrenemanExperimentResult(
'Orange',
(0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),
BrenemanExperimentResult(
'Foliage',
(0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))
BRENEMAN_EXPERIMENT_7_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.481), (0.211, 0.486), (2, 3)),
BrenemanExperimentResult(
'Red',
(0.448, 0.512), (0.409, 0.516), (9, 2)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.256, 0.506), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.305, 0.547), (5, 4)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.301, 0.539), (5, 2)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.257, 0.552), (3, 4)),
BrenemanExperimentResult(
'Foliage',
(0.225, 0.538), (0.222, 0.536), (3, 2)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.153, 0.529), (8, 2)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.475), (0.160, 0.484), (3, 5)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.331), (0.171, 0.379), (4, 11)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.187, 0.452), (4, 7)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.365), (0.240, 0.398), (4, 10)))
BRENEMAN_EXPERIMENT_8_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.524), (0.195, 0.469)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),
BrenemanExperimentResult(
'Foliage',
(0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),
BrenemanExperimentResult(
'Blue-green',
(0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),
BrenemanExperimentResult(
'Sky',
(0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))
BRENEMAN_EXPERIMENT_9_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.254, 0.525), (0.195, 0.465)),
BrenemanExperimentResult(
'Gray',
(0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),
BrenemanExperimentResult(
'Foliage',
(0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),
BrenemanExperimentResult(
'Blue-green',
(0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),
BrenemanExperimentResult(
'(Gray)h',
(0.256, 0.525), (0.208, 0.498)),
BrenemanExperimentResult(
'(Red)h',
(0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),
BrenemanExperimentResult(
'(Brown)h',
(0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),
BrenemanExperimentResult(
'(Foliage)h',
(0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),
BrenemanExperimentResult(
'(Green)h',
(0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),
BrenemanExperimentResult(
'(Blue)h',
(0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),
BrenemanExperimentResult(
'(Purple)h',
(0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))
BRENEMAN_EXPERIMENT_10_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.213, 0.494), (3, 3)),
BrenemanExperimentResult(
'Red',
(0.447, 0.512), (0.411, 0.506), (15, 7)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.269, 0.511), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.315, 0.536), (7, 8)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.310, 0.526), (6, 8)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.556), (0.268, 0.541), (3, 6)),
BrenemanExperimentResult(
'Foliage',
(0.226, 0.538), (0.230, 0.525), (4, 8)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.158, 0.524), (6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.476), (0.161, 0.491), (4, 4)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.330), (0.171, 0.377), (6, 19)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.439), (0.187, 0.465), (5, 5)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.240, 0.402), (3, 12)))
BRENEMAN_EXPERIMENT_11_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.208, 0.482), (0.174, 0.520)),
BrenemanExperimentResult(
'Gray',
(0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),
BrenemanExperimentResult(
'Green',
(0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))
BRENEMAN_EXPERIMENT_12_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.205, 0.482), (0.174, 0.519)),
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),
BrenemanExperimentResult(
'Brown',
(0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),
BrenemanExperimentResult(
'Green',
(0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({
1: PrimariesChromaticityCoordinates(
1, ('A', 'D65'), 1500,
(0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),
2: PrimariesChromaticityCoordinates(
2, ('Projector', 'D55'), 1500,
(0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),
3: PrimariesChromaticityCoordinates(
3, ('Projector', 'D55'), 75,
(0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),
4: PrimariesChromaticityCoordinates(
4, ('A', 'D65'), 75,
(0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),
6: PrimariesChromaticityCoordinates(
6, ('A', 'D55'), 11100,
(0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),
8: PrimariesChromaticityCoordinates(
8, ('A', 'D65'), 350,
(0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),
9: PrimariesChromaticityCoordinates(
9, ('A', 'D65'), 15,
(0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),
11: PrimariesChromaticityCoordinates(
11, ('D55', 'green'), 1560,
(0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),
12: PrimariesChromaticityCoordinates(
12, ('D55', 'green'), 75,
(0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = """
*Breneman (1987)* experiments primaries chromaticities.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict
"""
BRENEMAN_EXPERIMENTS = DocstringDict({
1: BRENEMAN_EXPERIMENT_1_RESULTS,
2: BRENEMAN_EXPERIMENT_2_RESULTS,
3: BRENEMAN_EXPERIMENT_3_RESULTS,
4: BRENEMAN_EXPERIMENT_4_RESULTS,
5: BRENEMAN_EXPERIMENT_5_RESULTS,
6: BRENEMAN_EXPERIMENT_6_RESULTS,
7: BRENEMAN_EXPERIMENT_7_RESULTS,
8: BRENEMAN_EXPERIMENT_8_RESULTS,
9: BRENEMAN_EXPERIMENT_9_RESULTS,
10: BRENEMAN_EXPERIMENT_10_RESULTS,
11: BRENEMAN_EXPERIMENT_11_RESULTS,
12: BRENEMAN_EXPERIMENT_12_RESULTS
})
BRENEMAN_EXPERIMENTS.__doc__ = """
*Breneman (1987)* experiments.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS : dict
"""
| true | true |
f71b2b58505f1a73cc43c49801a8cae13c3f8a26 | 43 | py | Python | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2018-04-02T15:38:10.000Z | 2018-04-02T15:38:10.000Z | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | import state
def change():
state.x = 2 | 10.75 | 15 | 0.627907 | import state
def change():
state.x = 2 | true | true |
f71b2c8b5ead15b27ff28d5dc1c80528e9c46c18 | 2,755 | py | Python | profiler/torchmodules/torchlogger/activation_gradient_logger.py | NestLakerJasonLIN/pipedream | f50827f2e28cbdbd82a4ea686c0498272b1460d6 | [
"MIT"
] | 273 | 2019-08-31T14:12:11.000Z | 2022-03-05T13:34:25.000Z | profiler/torchmodules/torchlogger/activation_gradient_logger.py | albertsh10/pipedream | cad624f79a71f44ba79099f0c38321347b13e5c2 | [
"MIT"
] | 67 | 2019-09-19T15:36:59.000Z | 2022-01-13T09:11:54.000Z | profiler/torchmodules/torchlogger/activation_gradient_logger.py | albertsh10/pipedream | cad624f79a71f44ba79099f0c38321347b13e5c2 | [
"MIT"
] | 100 | 2019-09-16T20:59:14.000Z | 2022-03-23T12:56:56.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import torch
class ActivationAndGradientLogger:
def __init__(self, directory):
self.directory = directory
try:
os.mkdir(self.directory)
except:
pass
self.iteration = 0
self.forward_counter = 0
self.backward_counter = 0
def reset_counters(self):
self.forward_counter = 0
self.backward_counter = 0
def hook_modules(self, module, iteration):
self.iteration = iteration
sub_directory = os.path.join(self.directory, str(iteration))
try:
os.mkdir(sub_directory)
except:
pass
self.hook_modules_helper(module, sub_directory)
def hook_modules_helper(self, module, sub_directory):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.hook_modules_helper(sub_module, sub_directory)
else:
def forward_hook(*args):
activation = args[2]
filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)
with open(filename, 'wb') as f:
torch.save(activation, f)
self.forward_counter += 1
def backward_hook(*args):
gradient = args[2]
filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)
with open(filename, 'wb') as f:
torch.save(gradient, f)
self.backward_counter += 1
sub_module.register_forward_hook(forward_hook)
sub_module.register_backward_hook(backward_hook)
def unhook_modules(self, module):
self.unhook_modules_helper(module)
self.reset_counters()
def unhook_modules_helper(self, module):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.unhook_modules_helper(sub_module)
else:
sub_module.reset_hooks()
| 34.873418 | 103 | 0.591652 |
import os
import pickle
import torch
class ActivationAndGradientLogger:
def __init__(self, directory):
self.directory = directory
try:
os.mkdir(self.directory)
except:
pass
self.iteration = 0
self.forward_counter = 0
self.backward_counter = 0
def reset_counters(self):
self.forward_counter = 0
self.backward_counter = 0
def hook_modules(self, module, iteration):
self.iteration = iteration
sub_directory = os.path.join(self.directory, str(iteration))
try:
os.mkdir(sub_directory)
except:
pass
self.hook_modules_helper(module, sub_directory)
def hook_modules_helper(self, module, sub_directory):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
self.hook_modules_helper(sub_module, sub_directory)
else:
def forward_hook(*args):
activation = args[2]
filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)
with open(filename, 'wb') as f:
torch.save(activation, f)
self.forward_counter += 1
def backward_hook(*args):
gradient = args[2]
filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)
with open(filename, 'wb') as f:
torch.save(gradient, f)
self.backward_counter += 1
sub_module.register_forward_hook(forward_hook)
sub_module.register_backward_hook(backward_hook)
def unhook_modules(self, module):
self.unhook_modules_helper(module)
self.reset_counters()
def unhook_modules_helper(self, module):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.unhook_modules_helper(sub_module)
else:
sub_module.reset_hooks()
| true | true |
f71b2c90cb12b8290d45257d9a8169e55982187d | 4,207 | py | Python | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Dustin Ingram
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import cgi
import re
import sys
try:
from StringIO import StringIO
except ImportError:
from _io import StringIO
import readme_renderer.markdown
import readme_renderer.rst
import readme_renderer.txt
from twine.commands import _find_dists
from twine.package import PackageFile
_RENDERERS = {
None: readme_renderer.rst, # Default if description_content_type is None
"text/plain": readme_renderer.txt,
"text/x-rst": readme_renderer.rst,
"text/markdown": readme_renderer.markdown,
}
# Regular expression used to capture and reformat doctuils warnings into
# something that a human can understand. This is loosely borrowed from
# Sphinx: https://github.com/sphinx-doc/sphinx/blob
# /c35eb6fade7a3b4a6de4183d1dd4196f04a5edaf/sphinx/util/docutils.py#L199
_REPORT_RE = re.compile(
r"^<string>:(?P<line>(?:\d+)?): "
r"\((?P<level>DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) "
r"(?P<message>.*)",
re.DOTALL | re.MULTILINE,
)
class _WarningStream(object):
def __init__(self):
self.output = StringIO()
def write(self, text):
matched = _REPORT_RE.search(text)
if not matched:
self.output.write(text)
return
self.output.write(
"line {line}: {level_text}: {message}\n".format(
level_text=matched.group("level").capitalize(),
line=matched.group("line"),
message=matched.group("message").rstrip("\r\n"),
)
)
def __str__(self):
return self.output.getvalue()
def check(dists, output_stream=sys.stdout):
uploads = [i for i in _find_dists(dists) if not i.endswith(".asc")]
stream = _WarningStream()
failure = False
for filename in uploads:
output_stream.write("Checking distribution %s: " % filename)
package = PackageFile.from_filename(filename, comment=None)
metadata = package.metadata_dictionary()
description = metadata["description"]
description_content_type = metadata["description_content_type"]
if description_content_type is None:
output_stream.write(
'warning: `long_description_content_type` missing. '
'defaulting to `text/x-rst`.\n'
)
description_content_type = 'text/x-rst'
content_type, params = cgi.parse_header(description_content_type)
renderer = _RENDERERS.get(content_type, _RENDERERS[None])
if description in {None, 'UNKNOWN\n\n\n'}:
output_stream.write('warning: `long_description` missing.\n')
output_stream.write("Passed\n")
else:
if renderer.render(description, stream=stream, **params) is None:
failure = True
output_stream.write("Failed\n")
output_stream.write(
"The project's long_description has invalid markup which "
"will not be rendered on PyPI. The following syntax "
"errors were detected:\n%s" % stream
)
else:
output_stream.write("Passed\n")
return failure
def main(args):
parser = argparse.ArgumentParser(prog="twine check")
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to check, usually dist/*",
)
args = parser.parse_args(args)
# Call the check function with the arguments from the command line
return check(args.dists)
| 32.114504 | 78 | 0.659853 |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import cgi
import re
import sys
try:
from StringIO import StringIO
except ImportError:
from _io import StringIO
import readme_renderer.markdown
import readme_renderer.rst
import readme_renderer.txt
from twine.commands import _find_dists
from twine.package import PackageFile
_RENDERERS = {
None: readme_renderer.rst,
"text/plain": readme_renderer.txt,
"text/x-rst": readme_renderer.rst,
"text/markdown": readme_renderer.markdown,
}
ORT_RE = re.compile(
r"^<string>:(?P<line>(?:\d+)?): "
r"\((?P<level>DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) "
r"(?P<message>.*)",
re.DOTALL | re.MULTILINE,
)
class _WarningStream(object):
def __init__(self):
self.output = StringIO()
def write(self, text):
matched = _REPORT_RE.search(text)
if not matched:
self.output.write(text)
return
self.output.write(
"line {line}: {level_text}: {message}\n".format(
level_text=matched.group("level").capitalize(),
line=matched.group("line"),
message=matched.group("message").rstrip("\r\n"),
)
)
def __str__(self):
return self.output.getvalue()
def check(dists, output_stream=sys.stdout):
uploads = [i for i in _find_dists(dists) if not i.endswith(".asc")]
stream = _WarningStream()
failure = False
for filename in uploads:
output_stream.write("Checking distribution %s: " % filename)
package = PackageFile.from_filename(filename, comment=None)
metadata = package.metadata_dictionary()
description = metadata["description"]
description_content_type = metadata["description_content_type"]
if description_content_type is None:
output_stream.write(
'warning: `long_description_content_type` missing. '
'defaulting to `text/x-rst`.\n'
)
description_content_type = 'text/x-rst'
content_type, params = cgi.parse_header(description_content_type)
renderer = _RENDERERS.get(content_type, _RENDERERS[None])
if description in {None, 'UNKNOWN\n\n\n'}:
output_stream.write('warning: `long_description` missing.\n')
output_stream.write("Passed\n")
else:
if renderer.render(description, stream=stream, **params) is None:
failure = True
output_stream.write("Failed\n")
output_stream.write(
"The project's long_description has invalid markup which "
"will not be rendered on PyPI. The following syntax "
"errors were detected:\n%s" % stream
)
else:
output_stream.write("Passed\n")
return failure
def main(args):
parser = argparse.ArgumentParser(prog="twine check")
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to check, usually dist/*",
)
args = parser.parse_args(args)
# Call the check function with the arguments from the command line
return check(args.dists)
| true | true |
f71b2d261e9d06c923fdc731f3a69eb747347726 | 11,627 | py | Python | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_Classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | 1 | 2021-06-04T02:46:37.000Z | 2021-06-04T02:46:37.000Z | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | null | null | null | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | null | null | null | import os, sys
dirpath = os.getcwd()
sys.path.insert(0, dirpath + '/goal_tether_functions')
sys.path.insert(0, dirpath + '/predictive_modelers')
sys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')
sys.path.insert(0, dirpath + '/active_learners')
sys.path.insert(0, dirpath + '/data_acquisition')
sys.path.insert(0, dirpath + '/diagnostics')
from createCampaign_battleship import main as createCampaign
# from createImageCampaign_Bria import main as createCampaign
from runCampaign2 import main as runCampaign
from database import *
import outputManager
import time
import boto3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
# Part 1 Plotting Function
def plot_simulation_accuracy(acc, title, mul_accuracy=False):
fig, ax = plt.subplots()
ax.set_ylabel("Accuracy (%)")
ax.set_xlabel("Iterations")
ax.set_title(title)
if mul_accuracy:
ax.plot(np.arange(len(acc[0])), acc[0], label="Full Space")
ax.plot(np.arange(len(acc[1])), acc[1], label="Forward Modeling")
ax.plot(np.arange(len(acc[2])), acc[2], label="Prediction Only")
else:
ax.plot(np.arange(len(acc)), acc)
ax.legend()
plt.show()
def average_arrays(mat):
array = []
for i in range(25):
avg = 0
for m in range(len(mat)):
if len(mat[m]) < i:
continue
avg += mat[m][i]
avg = avg/len(mat)
array.append(avg)
return array
wd =os.getcwd()
print("Current Working Directory: ", wd)
print()
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
# read data from S3
s3 = boto3.resource('s3')
s3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
time.sleep(5)
data = pd.read_csv("data/data.csv").dropna().to_numpy()
features = data[:, 4:]
labels = data[:, 2]
l = LabelEncoder()
labels = l.fit_transform(labels)
print(l.classes_)
s = KMeans(n_clusters=5)
# s.decision_function(features[:1000])
s.fit_transform(features[:1500])
print(s.score(features[1500:]))
d = np.zeros((20,20))
# create groundTruth
for i in range(len(data)):
if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):
continue
d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))
print(d)
np.savetxt('data_acquisition/project.txt', d)
print(labels)
# exit()
'''
campaign = createCampaign()
runCampaign(campaign)
acc = [np.array(campaign.accuracy_full), np.array(campaign.accuracy_forwardModeling),
np.array(campaign.accuracy_onlyPredictions)]
plot_simulation_accuracy(acc, "Model Accuracies for a Single Simulation", mul_accuracy=True)
'''
# Part 2 of Assignment - 2 independent variables (0-20) and 1 dependent variable (0-10) for 20 simulations
acc = []
for i in range(1):
campaign = createCampaign()
campaign.randoseed = 2
# campaign.ESS.iVars = [('int', 0, 9), ('int', 0, 9)]
# campaign.ESS.dVars = [('int', 0, 2)]
campaign.groundtruthData = 'data_acquisition/project.txt'
campaign.simsFlag = True
runCampaign(campaign)
acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]
# acc = average_arrays(acc)
plot_simulation_accuracy(acc, "Three Accuracies for the Experimental Space", mul_accuracy=True)
# Part 3 of Assignment -
# acc1, acc2, acc3, acc4 = [], [], [], []
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.high_homogeneity = True
# campaign.ESS.h_num = 2
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 2)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc1.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.h_num = 2
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 2)]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc2.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.high_homogeneity = True
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc3.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc4.append(acc)
#
# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="H-2", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="L-2", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="H-10", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="L-10", color="black")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Homogeneity within Experimental Spaces")
# plt.legend()
# plt.show()
# Part 4 of Assignment -
# acc1, acc2, acc3, acc4 = [], [], [], []
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed= 45
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc1.append(acc)
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.randoseed = 1
# campaign.ESS.error = 0.1
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc2.append(acc)
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 2
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc3.append(acc)
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 1.0
# campaign.randoseed=3
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc4.append(acc)
#
# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="0.0", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="0.1", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="0.5", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="1.0", color="black")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Error Rates within Experimental Spaces")
# plt.legend()
# plt.show()
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed = 53
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc1 = campaign.accuracy_onlyPredictions
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed = 39
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc2 = campaign.accuracy_onlyPredictions
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.1
# campaign.randoseed = 32
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc3 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.1
# campaign.randoseed = 17
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc4 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 3
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc5 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 15
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc6 = campaign.accuracy_onlyPredictions
#
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="0.0 - B", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="0.0 - N", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="0.1 - B", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="0.1 - N", color="black")
# plt.plot([i+1 for i in range(len(acc5))], acc5, label="0.5 - B", color="yellow")
# plt.plot([i+1 for i in range(len(acc6))], acc6, label="0.5 - N", color="cyan")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Categorical Models within Experimental Spaces")
# plt.legend()
# plt.show()
| 32.387187 | 113 | 0.637224 | import os, sys
dirpath = os.getcwd()
sys.path.insert(0, dirpath + '/goal_tether_functions')
sys.path.insert(0, dirpath + '/predictive_modelers')
sys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')
sys.path.insert(0, dirpath + '/active_learners')
sys.path.insert(0, dirpath + '/data_acquisition')
sys.path.insert(0, dirpath + '/diagnostics')
from createCampaign_battleship import main as createCampaign
from runCampaign2 import main as runCampaign
from database import *
import outputManager
import time
import boto3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
def plot_simulation_accuracy(acc, title, mul_accuracy=False):
fig, ax = plt.subplots()
ax.set_ylabel("Accuracy (%)")
ax.set_xlabel("Iterations")
ax.set_title(title)
if mul_accuracy:
ax.plot(np.arange(len(acc[0])), acc[0], label="Full Space")
ax.plot(np.arange(len(acc[1])), acc[1], label="Forward Modeling")
ax.plot(np.arange(len(acc[2])), acc[2], label="Prediction Only")
else:
ax.plot(np.arange(len(acc)), acc)
ax.legend()
plt.show()
def average_arrays(mat):
array = []
for i in range(25):
avg = 0
for m in range(len(mat)):
if len(mat[m]) < i:
continue
avg += mat[m][i]
avg = avg/len(mat)
array.append(avg)
return array
wd =os.getcwd()
print("Current Working Directory: ", wd)
print()
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
s3 = boto3.resource('s3')
s3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
time.sleep(5)
data = pd.read_csv("data/data.csv").dropna().to_numpy()
features = data[:, 4:]
labels = data[:, 2]
l = LabelEncoder()
labels = l.fit_transform(labels)
print(l.classes_)
s = KMeans(n_clusters=5)
s.fit_transform(features[:1500])
print(s.score(features[1500:]))
d = np.zeros((20,20))
for i in range(len(data)):
if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):
continue
d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))
print(d)
np.savetxt('data_acquisition/project.txt', d)
print(labels)
acc = []
for i in range(1):
campaign = createCampaign()
campaign.randoseed = 2
campaign.groundtruthData = 'data_acquisition/project.txt'
campaign.simsFlag = True
runCampaign(campaign)
acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]
plot_simulation_accuracy(acc, "Three Accuracies for the Experimental Space", mul_accuracy=True)
| true | true |
f71b2dd96ed4ebc42bacf10ebdd08e56bd022192 | 1,747 | py | Python | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Sourced from https://gist.github.com/seventhskye/0cc7b2804252975d36dca047ab7729e9 with some modifications
import os
import boto3
def main():
client = boto3.client('s3')
Bucket = os.environ.get('S3_BUCKET')
Prefix = os.environ.get('S3_PREFIX', '') # leave blank to delete the entire contents
IsTruncated = True
MaxKeys = 1000
KeyMarker = None
if Bucket is None:
print("Environment variable S3_BUCKET must be set!")
return
while IsTruncated == True:
if not KeyMarker:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix)
else:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix,
KeyMarker=KeyMarker)
try:
objects = []
versions = version_list['Versions']
for v in versions:
objects.append({'VersionId':v['VersionId'],'Key': v['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
try:
objects = []
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
objects.append({'VersionId':d['VersionId'],'Key': d['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
IsTruncated = version_list['IsTruncated']
if 'NextKeyMarker' in version_list:
KeyMarker = version_list['NextKeyMarker']
if __name__ == '__main__':
main()
| 28.639344 | 107 | 0.627934 |
import os
import boto3
def main():
client = boto3.client('s3')
Bucket = os.environ.get('S3_BUCKET')
Prefix = os.environ.get('S3_PREFIX', '')
IsTruncated = True
MaxKeys = 1000
KeyMarker = None
if Bucket is None:
print("Environment variable S3_BUCKET must be set!")
return
while IsTruncated == True:
if not KeyMarker:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix)
else:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix,
KeyMarker=KeyMarker)
try:
objects = []
versions = version_list['Versions']
for v in versions:
objects.append({'VersionId':v['VersionId'],'Key': v['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
try:
objects = []
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
objects.append({'VersionId':d['VersionId'],'Key': d['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
IsTruncated = version_list['IsTruncated']
if 'NextKeyMarker' in version_list:
KeyMarker = version_list['NextKeyMarker']
if __name__ == '__main__':
main()
| true | true |
f71b2e2da61209cbca5bf4e3278259b97ab94ddd | 135 | py | Python | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | PIWIK_DB_HOST = 'localhost'
PIWIK_DB_PORT = 3336
PIWIK_DB_USER = 'root'
PIWIK_DB_PASSWORD = 'changeme'
PIWIK_DB_NAME = 'piwik_staging'
| 22.5 | 31 | 0.792593 | PIWIK_DB_HOST = 'localhost'
PIWIK_DB_PORT = 3336
PIWIK_DB_USER = 'root'
PIWIK_DB_PASSWORD = 'changeme'
PIWIK_DB_NAME = 'piwik_staging'
| true | true |
f71b2e305d3d87fbaf19ce272d3f1ec378c3ef49 | 1,017 | py | Python | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 2 | 2017-08-01T10:29:00.000Z | 2022-03-05T12:51:43.000Z | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 1 | 2018-08-07T13:40:38.000Z | 2018-08-07T13:40:38.000Z | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 2 | 2019-11-27T09:34:44.000Z | 2022-03-05T12:59:12.000Z | #!/usr/bin/env python
#coding:utf-8
__author__ = 'gkiwi'
from django.db.utils import DatabaseError
from django.db.migrations.exceptions import MigrationSchemaMissing
__all__ = ['MigrationRecorder']
class MigrationRecorder(object):
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
# gkiwi #TOPATCH
from django.db import connection
db_table = connection.get_schemaed_db_table(self.Migration._meta.db_table)
# end
if db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
| 31.78125 | 99 | 0.665683 |
__author__ = 'gkiwi'
from django.db.utils import DatabaseError
from django.db.migrations.exceptions import MigrationSchemaMissing
__all__ = ['MigrationRecorder']
class MigrationRecorder(object):
def ensure_schema(self):
# in the codebase.
# gkiwi #TOPATCH
from django.db import connection
db_table = connection.get_schemaed_db_table(self.Migration._meta.db_table)
# end
if db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
| true | true |
f71b2e92c0252192242ac618201c720c01142e52 | 46,863 | py | Python | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import csv
import glob
import scipy
import sklearn
import numpy as np
import hmmlearn.hmm
import sklearn.cluster
import pickle as cpickle
import matplotlib.pyplot as plt
from scipy.spatial import distance
import sklearn.discriminant_analysis
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as at
from pyAudioAnalysis import MidTermFeatures as mtf
from pyAudioAnalysis import ShortTermFeatures as stf
""" General utility functions """
def smooth_moving_avg(signal, window=11):
window = int(window)
if signal.ndim != 1:
raise ValueError("")
if signal.size < window:
raise ValueError("Input vector needs to be bigger than window size.")
if window < 3:
return signal
s = np.r_[2 * signal[0] - signal[window - 1::-1],
signal, 2 * signal[-1] - signal[-1:-window:-1]]
w = np.ones(window, 'd')
y = np.convolve(w/w.sum(), s, mode='same')
return y[window:-window + 1]
def self_similarity_matrix(feature_vectors):
"""
This function computes the self-similarity matrix for a sequence
of feature vectors.
ARGUMENTS:
- feature_vectors: a np matrix (nDims x nVectors) whose i-th column
corresponds to the i-th feature vector
RETURNS:
- sim_matrix: the self-similarity matrix (nVectors x nVectors)
"""
norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])
norm_feature_vectors = norm_feature_vectors[0].T
sim_matrix = 1.0 - distance.squareform(
distance.pdist(norm_feature_vectors.T, 'cosine'))
return sim_matrix
def labels_to_segments(labels, window):
"""
ARGUMENTS:
- labels: a sequence of class labels (per time window)
- window: window duration (in seconds)
RETURNS:
- segments: a sequence of segment's limits: segs[i, 0] is start and
segs[i, 1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of
the i-th segment
"""
if len(labels)==1:
segs = [0, window]
classes = labels
return segs, classes
num_segs = 0
index = 0
classes = []
segment_list = []
cur_label = labels[index]
while index < len(labels) - 1:
previous_value = cur_label
while True:
index += 1
compare_flag = labels[index]
if (compare_flag != cur_label) | (index == len(labels) - 1):
num_segs += 1
cur_label = labels[index]
segment_list.append((index * window))
classes.append(previous_value)
break
segments = np.zeros((len(segment_list), 2))
for i in range(len(segment_list)):
if i > 0:
segments[i, 0] = segment_list[i-1]
segments[i, 1] = segment_list[i]
return segments, classes
def segments_to_labels(start_times, end_times, labels, window):
"""
This function converts segment endpoints and respective segment
labels to fix-sized class labels.
ARGUMENTS:
- start_times: segment start points (in seconds)
- end_times: segment endpoints (in seconds)
- labels: segment labels
- window: fix-sized window (in seconds)
RETURNS:
- flags: np array of class indices
- class_names: list of classnames (strings)
"""
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def compute_metrics(confusion_matrix, class_names):
"""
This function computes the precision, recall and f1 measures,
given a confusion matrix
"""
f1 = []
recall = []
precision = []
n_classes = confusion_matrix.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
else:
for i, c in enumerate(class_names):
precision.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[:, i]))
recall.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[i, :]))
f1.append(2 * precision[-1] * recall[-1] /
(precision[-1] + recall[-1]))
return recall, precision, f1
def read_segmentation_gt(gt_file):
"""
This function reads a segmentation ground truth file,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a np array of segments' start positions
- seg_end: a np array of segments' ending positions
- seg_label: a list of respective class labels (strings)
"""
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,
evaluate_only=False):
"""
This function plots statistics on the classification-segmentation results
produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method
if ground-truth is available.
"""
flags = [class_names[int(f)] for f in flags_ind]
segments, classes = labels_to_segments(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = np.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not evaluate_only:
duration = segments[-1, 1]
s_percentages = np.zeros((len(class_names), ))
percentages = np.zeros((len(class_names), ))
av_durations = np.zeros((len(class_names), ))
for i_seg in range(segments.shape[0]):
s_percentages[class_names.index(classes[i_seg])] += \
(segments[i_seg, 1]-segments[i_seg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
class_sum = sum(1 for c in classes if c == class_names[i])
if class_sum > 0:
av_durations[i] = s_percentages[i] / class_sum
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(np.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
print(np.array(range(len(class_names))), percentages)
ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(np.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluate_speaker_diarization(labels, labels_gt):
min_len = min(labels.shape[0], labels_gt.shape[0])
labels = labels[0:min_len]
labels_gt = labels_gt[0:min_len]
unique_flags = np.unique(labels)
unique_flags_gt = np.unique(labels_gt)
# compute contigency table:
contigency_matrix = np.zeros((unique_flags.shape[0],
unique_flags_gt.shape[0]))
for i in range(min_len):
contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),
int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0
columns, rows = contigency_matrix.shape
row_sum = np.sum(contigency_matrix, axis=0)
column_sum = np.sum(contigency_matrix, axis=1)
matrix_sum = np.sum(contigency_matrix)
purity_clust = np.zeros((columns, ))
purity_speak = np.zeros((rows, ))
# compute cluster purity:
for i in range(columns):
purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])
for j in range(rows):
purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])
purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum
purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum
return purity_cluster_m, purity_speaker_m
def train_hmm_compute_statistics(features, labels):
"""
This function computes the statistics used to train
an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a np matrix of feature vectors (numOfDimensions x n_wins)
- labels: a np array of class indices (n_wins x 1)
RETURNS:
- class_priors: matrix of prior class probabilities
(n_classes x 1)
- transmutation_matrix: transition matrix (n_classes x n_classes)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
"""
unique_labels = np.unique(labels)
n_comps = len(unique_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
# compute prior probabilities:
class_priors = np.zeros((n_comps,))
for i, u_label in enumerate(unique_labels):
class_priors[i] = np.count_nonzero(labels == u_label)
# normalize prior probabilities
class_priors = class_priors / class_priors.sum()
# compute transition matrix:
transmutation_matrix = np.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1
# normalize rows of transition matrix:
for i in range(n_comps):
transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()
means = np.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = \
np.array(features[:,
np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))
cov = np.zeros((n_comps, n_feats))
for i in range(n_comps):
"""
cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])
"""
# use line above if HMM using full gaussian distributions are to be used
cov[i, :] = np.std(features[:,
np.nonzero(labels == unique_labels[i])[0]],
axis=1)
return class_priors, transmutation_matrix, means, cov
def train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification
using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,
<segment end in seconds>,<segment label> in each row
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win and mt_step
values are stored in the hmm_model_name file
"""
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
class_priors, transumation_matrix, means, cov = \
train_hmm_compute_statistics(features, flags)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transumation_matrix
save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)
return hmm, class_names
def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification using
a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- folder_path: the path of the data diretory
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win
and mt_step values are stored in the hmm_model_name file
"""
flags_all = np.array([])
class_names_all = []
for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):
# for each WAV file
wav_file = f
gt_file = f.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = \
segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
for c in class_names:
# update class names:
if c not in class_names_all:
class_names_all.append(c)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
feature_vector, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
flag_len = len(flags)
feat_cols = feature_vector.shape[1]
min_sm = min(feat_cols, flag_len)
feature_vector = feature_vector[:, 0:min_sm]
flags = flags[0:min_sm]
flags_new = []
# append features and labels
for j, fl in enumerate(flags):
flags_new.append(class_names_all.index(class_names_all[flags[j]]))
flags_all = np.append(flags_all, np.array(flags_new))
if i == 0:
f_all = feature_vector
else:
f_all = np.concatenate((f_all, feature_vector), axis=1)
# compute HMM statistics
class_priors, transmutation_matrix, means, cov = \
train_hmm_compute_statistics(f_all, flags_all)
# train the HMM
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transmutation_matrix
save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)
return hmm, class_names_all
def save_hmm(hmm_model_name, model, classes, mid_window, mid_step):
"""Save HMM model"""
with open(hmm_model_name, "wb") as f_handle:
cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
def hmm_segmentation(audio_file, hmm_model_name, plot_results=False,
gt_file=""):
sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)
with open(hmm_model_name, "rb") as f_handle:
hmm = cpickle.load(f_handle)
class_names = cpickle.load(f_handle)
mid_window = cpickle.load(f_handle)
mid_step = cpickle.load(f_handle)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
# apply model
labels = hmm.predict(features.T)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth_segments(gt_file, mt_step):
seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
mt_step)
labels_temp = []
for index, label in enumerate(labels):
# "align" labels with GT
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names
def calculate_confusion_matrix(predictions, ground_truth, classes):
cm = np.zeros((len(classes), len(classes)))
for index in range(min(predictions.shape[0], ground_truth.shape[0])):
cm[int(ground_truth[index]), int(predictions[index])] += 1
return cm
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
"""
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
"""
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
# Load classifier:
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
# load input file
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
# could not read file
if sampling_rate == 0:
return labels, class_names, accuracy, cm
# convert stereo (if) to mono
signal = audioBasicIO.stereo_to_mono(signal)
# mid-term feature extraction:
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
# for each feature vector (i.e. for each fix-sized segment):
for col_index in range(mt_feats.shape[1]):
# normalize current feature v
feature_vector = (mt_feats[:, col_index] - mean) / std
# classify vector:
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
# update probability matrix
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
# convert fix-sized flags to segments and classes
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
# Load grount-truth:
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = np.array([])
labels_gt = np.array([])
if os.path.isfile(gt_file):
# load ground truth and class names
labels_gt, class_names_gt = load_ground_truth_segments(gt_file,
mid_step)
# map predicted labels to ground truth class names
# Note: if a predicted label does not belong to the ground truth
# classes --> -1
labels_new = []
for il, l in enumerate(labels):
if class_names[int(l)] in class_names_gt:
labels_new.append(class_names_gt.index(class_names[int(l)]))
else:
labels_new.append(-1)
labels_new = np.array(labels_new)
cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)
accuracy = plot_segmentation_results(labels_new, labels_gt,
class_names, mid_step, not plot_results)
if accuracy >= 0:
print("Overall Accuracy: {0:.2f}".format(accuracy))
return labels_gt, class_names, accuracy, cm
def evaluate_segmentation_classification_dir(dir_name, model_name, method_name):
accuracies = []
class_names = []
cm_total = np.array([])
for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):
print(wav_file)
gt_file = wav_file.replace('.wav', '.segments')
if method_name.lower() in ["svm", "svm_rbf", "knn", "randomforest",
"gradientboosting", "extratrees"]:
flags_ind, class_names, accuracy, cm_temp = \
mid_term_file_classification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, accuracy, cm_temp = \
hmm_segmentation(wav_file, model_name, False, gt_file)
if accuracy > 0:
if not index:
cm_total = np.copy(cm_temp)
else:
cm_total = cm_total + cm_temp
accuracies.append(accuracy)
print(cm_temp, class_names)
print(cm_total)
if len(cm_total.shape) > 1:
cm_total = cm_total / np.sum(cm_total)
rec, pre, f1 = compute_metrics(cm_total, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".
format(100.0*np.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*np.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*np.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*np.array(f1).mean()))
print("Median Accuracy: {0:.1f}".
format(100.0*np.median(np.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*np.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*np.array(accuracies).max()))
else:
print("Confusion matrix was empty, accuracy for every file was 0")
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,
weight=0.5, plot=False):
"""
Event Detection (silence removal)
ARGUMENTS:
- signal: the input audio signal
- sampling_rate: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1)
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],
[1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds
and (1.4, 3.0) seconds
"""
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
# number of 10% of the total short-term windows
st_windows_fraction = int(len(en) / 10)
# compute "lower" 10% energy threshold
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
# compute "higher" 10% energy threshold
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
# get all features that correspond to low energy
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
# get all features that correspond to high energy
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
# form the binary classification task and ...
features = [low_energy.T, high_energy.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
features_norm, mean, std = at.normalize_features(features)
svm = at.train_svm(features_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for index in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, index] - mean) / std
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
# smooth probability:
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = np.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
# get the indices of the frames that satisfy the thresholding
index = 0
seg_limits = []
time_clusters = []
# Step 4B: group frame indices to onset segments
while index < len(max_indices):
# for each of the detected onset indices
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
seg_limits = seg_limits_2
if plot:
time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /
sampling_rate)
plt.subplot(2, 1, 1)
plt.plot(time_x, signal)
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.title('svm Probability')
plt.show()
return seg_limits
def speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,
short_window=0.05, lda_dim=35, plot_res=False):
"""
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in
the recording (<=0 for unknown)
- mid_window (opt) mid-term window size
- mid_step (opt) mid-term window step
- short_window (opt) short-term window size
- lda_dim (opt LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plotting
"""
sampling_rate, signal = audioBasicIO.read_audio_file(filename)
signal = audioBasicIO.stereo_to_mono(signal)
duration = len(signal) / sampling_rate
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/models")
classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_10"))
classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_male_female"))
mid_feats, st_feats, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_window * 0.5))
mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +
len(class_names_fm), mid_feats.shape[1]))
for index in range(mid_feats.shape[1]):
feature_norm_all = (mid_feats[:, index] - mean_all) / std_all
feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn", feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
start = mid_feats.shape[0]
end = mid_feats.shape[0] + len(class_names_all)
mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]
mid_term_features[start:end, index] = p1 + 1e-4
mid_term_features[end::, index] = p2 + 1e-4
mid_feats = mid_term_features # TODO
feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mid_feats = mid_feats[feature_selected, :]
mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])
mid_feats_norm = mid_feats_norm[0].T
n_wins = mid_feats.shape[1]
# remove outliers:
dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),
axis=0)
m_dist_all = np.mean(dist_all)
i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]
# TODO: Combine energy threshold for outlier removal:
# EnergyMin = np.min(mt_feats[1,:])
# EnergyMean = np.mean(mt_feats[1,:])
# Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
# i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]
# print i_non_outliers
mt_feats_norm_or = mid_feats_norm
mid_feats_norm = mid_feats_norm[:, i_non_outliers]
# LDA dimensionality reduction:
if lda_dim > 0:
# extract mid-term features with minimum step:
window_ratio = int(round(mid_window / short_window))
step_ratio = int(round(short_window / short_window))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
for index in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
# for each of the short-term features:
for index in range(num_of_features):
cur_pos = 0
feat_len = len(st_feats[index])
while cur_pos < feat_len:
n1 = cur_pos
n2 = cur_pos + window_ratio
if n2 > feat_len:
n2 = feat_len
short_features = st_feats[index][n1:n2]
mt_feats_to_red[index].append(np.mean(short_features))
mt_feats_to_red[index + num_of_features].\
append(np.std(short_features))
cur_pos += step_ratio
mt_feats_to_red = np.array(mt_feats_to_red)
mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +
len(class_names_all) +
len(class_names_fm),
mt_feats_to_red.shape[1]))
limit = mt_feats_to_red.shape[0] + len(class_names_all)
for index in range(mt_feats_to_red.shape[1]):
feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all
feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn",
feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \
mt_feats_to_red[:, index]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4
mt_feats_to_red_2[limit::, index] = p2 + 1e-4
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[feature_selected, :]
mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
labels = np.zeros((mt_feats_to_red.shape[1], ))
lda_step = 1.0
lda_step_ratio = lda_step / short_window
for index in range(labels.shape[0]):
labels[index] = int(index * short_window / lda_step_ratio)
clf = sklearn.discriminant_analysis.\
LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, labels)
mid_feats_norm = (clf.transform(mid_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
cluster_labels = []
sil_all = []
cluster_centers = []
for speakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=speakers)
k_means.fit(mid_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
cluster_labels.append(cls)
cluster_centers.append(means)
sil_1, sil_2 = [], []
for c in range(speakers):
# for each speaker (i.e. for each extracted cluster)
clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
# get subset of feature vectors
mt_feats_norm_temp = mid_feats_norm[:, cls == c]
# compute average distance between samples
# that belong to the cluster (a values)
dist = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(np.mean(dist)*clust_per_cent)
sil_temp = []
for c2 in range(speakers):
# compute distances from samples of other clusters
if c2 != c:
clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
mid_features_temp = mid_feats_norm[:, cls == c2]
dist = distance.cdist(mt_feats_norm_temp.T,
mid_features_temp.T)
sil_temp.append(np.mean(dist)*(clust_per_cent
+ clust_per_cent_2)/2.0)
sil_temp = np.array(sil_temp)
# ... and keep the minimum value (i.e.
# the distance from the "nearest" cluster)
sil_2.append(min(sil_temp))
sil_1 = np.array(sil_1)
sil_2 = np.array(sil_2)
sil = []
for c in range(speakers):
# for each cluster (speaker) compute silhouette
sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))
# keep the AVERAGE SILLOUETTE
sil_all.append(np.mean(sil))
imax = int(np.argmax(sil_all))
# optimal number of clusters
num_speakers = s_range[imax]
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows:
# this is achieved by giving them the value of their
# nearest non-outlier window)
cls = np.zeros((n_wins,))
for index in range(n_wins):
j = np.argmin(np.abs(index-i_non_outliers))
cls[index] = cluster_labels[imax][j]
# Post-process method 1: hmm smoothing
for index in range(1):
# hmm training
start_prob, transmat, means, cov = \
train_hmm_compute_statistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
class_names = ["speaker{0:d}".format(c) for c in range(num_speakers)]
# load ground-truth if available
gt_file = filename.replace('.wav', '.segments')
# if groundtruth exists
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,
seg_labs, mid_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(np.array(range(len(flags_gt))) *
mid_step + mid_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluate_speaker_diarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
if n_speakers <= 0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters")
plt.ylabel("average clustering's sillouette")
plt.show()
return cls
def speaker_diarization_evaluation(folder_name, lda_dimensions):
"""
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
segment (ground-truth) files are stored
- lda_dimensions: a list of LDA dimensions (0 for no LDA)
"""
types = ('*.wav', )
wav_files = []
for files in types:
wav_files.extend(glob.glob(os.path.join(folder_name, files)))
wav_files = sorted(wav_files)
# get number of unique speakers per file (from ground-truth)
num_speakers = []
for wav_file in wav_files:
gt_file = wav_file.replace('.wav', '.segments')
if os.path.isfile(gt_file):
_, _, seg_labs = read_segmentation_gt(gt_file)
num_speakers.append(len(list(set(seg_labs))))
else:
num_speakers.append(-1)
for dim in lda_dimensions:
print("LDA = {0:d}".format(dim))
for i, wav_file in enumerate(wav_files):
speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,
plot_res=False)
def music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,
thumb_size=10.0, limit_1=0, limit_2=1):
"""
This function detects instances of the most representative part of a
music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of
audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1
second
- Compute the self-similarity matrix, i.e. all pairwise similarities
between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the
self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered)
self-similarity matrix. The audio segments that correspond to the
diagonial around that position are the selected thumbnails
ARGUMENTS:
- signal: input signal
- sampling_rate: sampling frequency
- short_window: window size (in seconds)
- short_step: window step (in seconds)
- thumb_size: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[fs, x] = basicIO.readAudioFile(input_file)
[A1, A2, B1, B2] = musicThumbnailing(x, fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing
of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
"""
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
sampling_rate * short_window,
sampling_rate * short_step)
# self-similarity matrix
sim_matrix = self_similarity_matrix(st_feats)
# moving filter:
m_filter = int(round(thumb_size / short_step))
diagonal = np.eye(m_filter, m_filter)
sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')
# post-processing (remove main diagonal elements)
min_sm = np.min(sim_matrix)
for i in range(sim_matrix.shape[0]):
for j in range(sim_matrix.shape[1]):
if abs(i-j) < 5.0 / short_step or i > j:
sim_matrix[i, j] = min_sm
# find max position:
sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm
sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm
sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm
sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm
rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)
i1 = rows
i2 = rows
j1 = cols
j2 = cols
while i2-i1 < m_filter:
if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \
j2 >= sim_matrix.shape[1]-2:
break
if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \
sim_matrix
| 39.714407 | 83 | 0.598959 | from __future__ import print_function
import os
import csv
import glob
import scipy
import sklearn
import numpy as np
import hmmlearn.hmm
import sklearn.cluster
import pickle as cpickle
import matplotlib.pyplot as plt
from scipy.spatial import distance
import sklearn.discriminant_analysis
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as at
from pyAudioAnalysis import MidTermFeatures as mtf
from pyAudioAnalysis import ShortTermFeatures as stf
def smooth_moving_avg(signal, window=11):
window = int(window)
if signal.ndim != 1:
raise ValueError("")
if signal.size < window:
raise ValueError("Input vector needs to be bigger than window size.")
if window < 3:
return signal
s = np.r_[2 * signal[0] - signal[window - 1::-1],
signal, 2 * signal[-1] - signal[-1:-window:-1]]
w = np.ones(window, 'd')
y = np.convolve(w/w.sum(), s, mode='same')
return y[window:-window + 1]
def self_similarity_matrix(feature_vectors):
norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])
norm_feature_vectors = norm_feature_vectors[0].T
sim_matrix = 1.0 - distance.squareform(
distance.pdist(norm_feature_vectors.T, 'cosine'))
return sim_matrix
def labels_to_segments(labels, window):
if len(labels)==1:
segs = [0, window]
classes = labels
return segs, classes
num_segs = 0
index = 0
classes = []
segment_list = []
cur_label = labels[index]
while index < len(labels) - 1:
previous_value = cur_label
while True:
index += 1
compare_flag = labels[index]
if (compare_flag != cur_label) | (index == len(labels) - 1):
num_segs += 1
cur_label = labels[index]
segment_list.append((index * window))
classes.append(previous_value)
break
segments = np.zeros((len(segment_list), 2))
for i in range(len(segment_list)):
if i > 0:
segments[i, 0] = segment_list[i-1]
segments[i, 1] = segment_list[i]
return segments, classes
def segments_to_labels(start_times, end_times, labels, window):
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def compute_metrics(confusion_matrix, class_names):
f1 = []
recall = []
precision = []
n_classes = confusion_matrix.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
else:
for i, c in enumerate(class_names):
precision.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[:, i]))
recall.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[i, :]))
f1.append(2 * precision[-1] * recall[-1] /
(precision[-1] + recall[-1]))
return recall, precision, f1
def read_segmentation_gt(gt_file):
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,
evaluate_only=False):
flags = [class_names[int(f)] for f in flags_ind]
segments, classes = labels_to_segments(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = np.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not evaluate_only:
duration = segments[-1, 1]
s_percentages = np.zeros((len(class_names), ))
percentages = np.zeros((len(class_names), ))
av_durations = np.zeros((len(class_names), ))
for i_seg in range(segments.shape[0]):
s_percentages[class_names.index(classes[i_seg])] += \
(segments[i_seg, 1]-segments[i_seg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
class_sum = sum(1 for c in classes if c == class_names[i])
if class_sum > 0:
av_durations[i] = s_percentages[i] / class_sum
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(np.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
print(np.array(range(len(class_names))), percentages)
ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(np.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluate_speaker_diarization(labels, labels_gt):
min_len = min(labels.shape[0], labels_gt.shape[0])
labels = labels[0:min_len]
labels_gt = labels_gt[0:min_len]
unique_flags = np.unique(labels)
unique_flags_gt = np.unique(labels_gt)
contigency_matrix = np.zeros((unique_flags.shape[0],
unique_flags_gt.shape[0]))
for i in range(min_len):
contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),
int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0
columns, rows = contigency_matrix.shape
row_sum = np.sum(contigency_matrix, axis=0)
column_sum = np.sum(contigency_matrix, axis=1)
matrix_sum = np.sum(contigency_matrix)
purity_clust = np.zeros((columns, ))
purity_speak = np.zeros((rows, ))
for i in range(columns):
purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])
for j in range(rows):
purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])
purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum
purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum
return purity_cluster_m, purity_speaker_m
def train_hmm_compute_statistics(features, labels):
unique_labels = np.unique(labels)
n_comps = len(unique_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
class_priors = np.zeros((n_comps,))
for i, u_label in enumerate(unique_labels):
class_priors[i] = np.count_nonzero(labels == u_label)
class_priors = class_priors / class_priors.sum()
transmutation_matrix = np.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1
for i in range(n_comps):
transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()
means = np.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = \
np.array(features[:,
np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))
cov = np.zeros((n_comps, n_feats))
for i in range(n_comps):
cov[i, :] = np.std(features[:,
np.nonzero(labels == unique_labels[i])[0]],
axis=1)
return class_priors, transmutation_matrix, means, cov
def train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
class_priors, transumation_matrix, means, cov = \
train_hmm_compute_statistics(features, flags)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transumation_matrix
save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)
return hmm, class_names
def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):
flags_all = np.array([])
class_names_all = []
for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):
wav_file = f
gt_file = f.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = \
segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
for c in class_names:
if c not in class_names_all:
class_names_all.append(c)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
feature_vector, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
flag_len = len(flags)
feat_cols = feature_vector.shape[1]
min_sm = min(feat_cols, flag_len)
feature_vector = feature_vector[:, 0:min_sm]
flags = flags[0:min_sm]
flags_new = []
for j, fl in enumerate(flags):
flags_new.append(class_names_all.index(class_names_all[flags[j]]))
flags_all = np.append(flags_all, np.array(flags_new))
if i == 0:
f_all = feature_vector
else:
f_all = np.concatenate((f_all, feature_vector), axis=1)
class_priors, transmutation_matrix, means, cov = \
train_hmm_compute_statistics(f_all, flags_all)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transmutation_matrix
save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)
return hmm, class_names_all
def save_hmm(hmm_model_name, model, classes, mid_window, mid_step):
with open(hmm_model_name, "wb") as f_handle:
cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
def hmm_segmentation(audio_file, hmm_model_name, plot_results=False,
gt_file=""):
sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)
with open(hmm_model_name, "rb") as f_handle:
hmm = cpickle.load(f_handle)
class_names = cpickle.load(f_handle)
mid_window = cpickle.load(f_handle)
mid_step = cpickle.load(f_handle)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
labels = hmm.predict(features.T)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth_segments(gt_file, mt_step):
seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
mt_step)
labels_temp = []
for index, label in enumerate(labels):
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names
def calculate_confusion_matrix(predictions, ground_truth, classes):
cm = np.zeros((len(classes), len(classes)))
for index in range(min(predictions.shape[0], ground_truth.shape[0])):
cm[int(ground_truth[index]), int(predictions[index])] += 1
return cm
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
if sampling_rate == 0:
return labels, class_names, accuracy, cm
signal = audioBasicIO.stereo_to_mono(signal)
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
for col_index in range(mt_feats.shape[1]):
feature_vector = (mt_feats[:, col_index] - mean) / std
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = np.array([])
labels_gt = np.array([])
if os.path.isfile(gt_file):
labels_gt, class_names_gt = load_ground_truth_segments(gt_file,
mid_step)
labels_new = []
for il, l in enumerate(labels):
if class_names[int(l)] in class_names_gt:
labels_new.append(class_names_gt.index(class_names[int(l)]))
else:
labels_new.append(-1)
labels_new = np.array(labels_new)
cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)
accuracy = plot_segmentation_results(labels_new, labels_gt,
class_names, mid_step, not plot_results)
if accuracy >= 0:
print("Overall Accuracy: {0:.2f}".format(accuracy))
return labels_gt, class_names, accuracy, cm
def evaluate_segmentation_classification_dir(dir_name, model_name, method_name):
accuracies = []
class_names = []
cm_total = np.array([])
for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):
print(wav_file)
gt_file = wav_file.replace('.wav', '.segments')
if method_name.lower() in ["svm", "svm_rbf", "knn", "randomforest",
"gradientboosting", "extratrees"]:
flags_ind, class_names, accuracy, cm_temp = \
mid_term_file_classification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, accuracy, cm_temp = \
hmm_segmentation(wav_file, model_name, False, gt_file)
if accuracy > 0:
if not index:
cm_total = np.copy(cm_temp)
else:
cm_total = cm_total + cm_temp
accuracies.append(accuracy)
print(cm_temp, class_names)
print(cm_total)
if len(cm_total.shape) > 1:
cm_total = cm_total / np.sum(cm_total)
rec, pre, f1 = compute_metrics(cm_total, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".
format(100.0*np.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*np.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*np.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*np.array(f1).mean()))
print("Median Accuracy: {0:.1f}".
format(100.0*np.median(np.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*np.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*np.array(accuracies).max()))
else:
print("Confusion matrix was empty, accuracy for every file was 0")
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,
weight=0.5, plot=False):
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
st_windows_fraction = int(len(en) / 10)
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
features = [low_energy.T, high_energy.T]
features_norm, mean, std = at.normalize_features(features)
svm = at.train_svm(features_norm, 1.0)
prob_on_set = []
for index in range(st_feats.shape[1]):
cur_fv = (st_feats[:, index] - mean) / std
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
prog_on_set_sort = np.sort(prob_on_set)
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
index = 0
seg_limits = []
time_clusters = []
while index < len(max_indices):
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
seg_limits = seg_limits_2
if plot:
time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /
sampling_rate)
plt.subplot(2, 1, 1)
plt.plot(time_x, signal)
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.title('svm Probability')
plt.show()
return seg_limits
def speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,
short_window=0.05, lda_dim=35, plot_res=False):
sampling_rate, signal = audioBasicIO.read_audio_file(filename)
signal = audioBasicIO.stereo_to_mono(signal)
duration = len(signal) / sampling_rate
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/models")
classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_10"))
classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_male_female"))
mid_feats, st_feats, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_window * 0.5))
mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +
len(class_names_fm), mid_feats.shape[1]))
for index in range(mid_feats.shape[1]):
feature_norm_all = (mid_feats[:, index] - mean_all) / std_all
feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn", feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
start = mid_feats.shape[0]
end = mid_feats.shape[0] + len(class_names_all)
mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]
mid_term_features[start:end, index] = p1 + 1e-4
mid_term_features[end::, index] = p2 + 1e-4
mid_feats = mid_term_features
feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mid_feats = mid_feats[feature_selected, :]
mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])
mid_feats_norm = mid_feats_norm[0].T
n_wins = mid_feats.shape[1]
dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),
axis=0)
m_dist_all = np.mean(dist_all)
i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]
mt_feats_norm_or = mid_feats_norm
mid_feats_norm = mid_feats_norm[:, i_non_outliers]
if lda_dim > 0:
window_ratio = int(round(mid_window / short_window))
step_ratio = int(round(short_window / short_window))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
for index in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
for index in range(num_of_features):
cur_pos = 0
feat_len = len(st_feats[index])
while cur_pos < feat_len:
n1 = cur_pos
n2 = cur_pos + window_ratio
if n2 > feat_len:
n2 = feat_len
short_features = st_feats[index][n1:n2]
mt_feats_to_red[index].append(np.mean(short_features))
mt_feats_to_red[index + num_of_features].\
append(np.std(short_features))
cur_pos += step_ratio
mt_feats_to_red = np.array(mt_feats_to_red)
mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +
len(class_names_all) +
len(class_names_fm),
mt_feats_to_red.shape[1]))
limit = mt_feats_to_red.shape[0] + len(class_names_all)
for index in range(mt_feats_to_red.shape[1]):
feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all
feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn",
feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \
mt_feats_to_red[:, index]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4
mt_feats_to_red_2[limit::, index] = p2 + 1e-4
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[feature_selected, :]
mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
labels = np.zeros((mt_feats_to_red.shape[1], ))
lda_step = 1.0
lda_step_ratio = lda_step / short_window
for index in range(labels.shape[0]):
labels[index] = int(index * short_window / lda_step_ratio)
clf = sklearn.discriminant_analysis.\
LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, labels)
mid_feats_norm = (clf.transform(mid_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
cluster_labels = []
sil_all = []
cluster_centers = []
for speakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=speakers)
k_means.fit(mid_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
cluster_labels.append(cls)
cluster_centers.append(means)
sil_1, sil_2 = [], []
for c in range(speakers):
clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
mt_feats_norm_temp = mid_feats_norm[:, cls == c]
dist = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(np.mean(dist)*clust_per_cent)
sil_temp = []
for c2 in range(speakers):
if c2 != c:
clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
mid_features_temp = mid_feats_norm[:, cls == c2]
dist = distance.cdist(mt_feats_norm_temp.T,
mid_features_temp.T)
sil_temp.append(np.mean(dist)*(clust_per_cent
+ clust_per_cent_2)/2.0)
sil_temp = np.array(sil_temp)
sil_2.append(min(sil_temp))
sil_1 = np.array(sil_1)
sil_2 = np.array(sil_2)
sil = []
for c in range(speakers):
sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))
sil_all.append(np.mean(sil))
imax = int(np.argmax(sil_all))
num_speakers = s_range[imax]
cls = np.zeros((n_wins,))
for index in range(n_wins):
j = np.argmin(np.abs(index-i_non_outliers))
cls[index] = cluster_labels[imax][j]
for index in range(1):
start_prob, transmat, means, cov = \
train_hmm_compute_statistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
class_names = ["speaker{0:d}".format(c) for c in range(num_speakers)]
gt_file = filename.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,
seg_labs, mid_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(np.array(range(len(flags_gt))) *
mid_step + mid_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluate_speaker_diarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
if n_speakers <= 0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters")
plt.ylabel("average clustering's sillouette")
plt.show()
return cls
def speaker_diarization_evaluation(folder_name, lda_dimensions):
types = ('*.wav', )
wav_files = []
for files in types:
wav_files.extend(glob.glob(os.path.join(folder_name, files)))
wav_files = sorted(wav_files)
# get number of unique speakers per file (from ground-truth)
num_speakers = []
for wav_file in wav_files:
gt_file = wav_file.replace('.wav', '.segments')
if os.path.isfile(gt_file):
_, _, seg_labs = read_segmentation_gt(gt_file)
num_speakers.append(len(list(set(seg_labs))))
else:
num_speakers.append(-1)
for dim in lda_dimensions:
print("LDA = {0:d}".format(dim))
for i, wav_file in enumerate(wav_files):
speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,
plot_res=False)
def music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,
thumb_size=10.0, limit_1=0, limit_2=1):
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
sampling_rate * short_window,
sampling_rate * short_step)
# self-similarity matrix
sim_matrix = self_similarity_matrix(st_feats)
# moving filter:
m_filter = int(round(thumb_size / short_step))
diagonal = np.eye(m_filter, m_filter)
sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')
# post-processing (remove main diagonal elements)
min_sm = np.min(sim_matrix)
for i in range(sim_matrix.shape[0]):
for j in range(sim_matrix.shape[1]):
if abs(i-j) < 5.0 / short_step or i > j:
sim_matrix[i, j] = min_sm
# find max position:
sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm
sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm
sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm
sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm
rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)
i1 = rows
i2 = rows
j1 = cols
j2 = cols
while i2-i1 < m_filter:
if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \
j2 >= sim_matrix.shape[1]-2:
break
if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \
sim_matrix
| true | true |
f71b2ed9253b60e916abe7efa50cc6715f2d213c | 2,044 | py | Python | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 3 | 2020-02-12T01:24:46.000Z | 2020-02-13T00:50:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 32 | 2020-02-20T10:20:56.000Z | 2022-02-10T01:42:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 1 | 2020-02-22T02:47:19.000Z | 2020-02-22T02:47:19.000Z | #!/usr/bin/env python3
import unittest
from src.crawler.iCrawler import iCrawler, UndefinedDatabaseException
from src.data.MetaDataItem import MetaDataItem
from test.mock.MockDataAccessor import MockDataAccessor
class MockCrawler(iCrawler):
def __init__(self):
super().__init__()
def next_downloadable(self):
return MetaDataItem(
title="title",
url="fake url 1",
download_src="youtube")
class TestICrawler(unittest.TestCase):
def setUp(self):
self.crawler = MockCrawler()
self.database = MockDataAccessor()
def test_compiles(self):
self.assertEqual(True, True)
def test_no_database(self):
metadata = self.crawler.next_downloadable()
try:
self.crawler.check_new_url(metadata.url)
self.assertTrue(False)
except UndefinedDatabaseException:
# Expected error
pass
def test_check_new_url(self):
self.crawler.set_database(self.database)
metadata = self.crawler.next_downloadable()
self.assertTrue(self.crawler.check_new_url(metadata.url))
self.database.publish_new_metadata(metadata)
self.assertFalse(self.crawler.check_new_url(metadata.url))
def test_run(self):
self.crawler.set_database(self.database)
metadata = self.crawler.run({})
self.database.publish_new_metadata(metadata)
id_list = self.database.fetch_video_id_list()
self.assertTrue(len(id_list) == 1)
metadata = self.database.fetch_metadata(id_list[0])
# Get exact copy of the metadata item that was published
copy_metadata = self.crawler.next_downloadable()
# need to do this cause the times can be off
copy_metadata.date_created = metadata.date_created
copy_metadata.id = metadata.id #need to do this because otherwise cant compare
self.assertEqual(metadata.to_json(), copy_metadata.to_json())
if __name__ == '__main__':
unittest.main()
| 29.623188 | 86 | 0.675147 |
import unittest
from src.crawler.iCrawler import iCrawler, UndefinedDatabaseException
from src.data.MetaDataItem import MetaDataItem
from test.mock.MockDataAccessor import MockDataAccessor
class MockCrawler(iCrawler):
def __init__(self):
super().__init__()
def next_downloadable(self):
return MetaDataItem(
title="title",
url="fake url 1",
download_src="youtube")
class TestICrawler(unittest.TestCase):
def setUp(self):
self.crawler = MockCrawler()
self.database = MockDataAccessor()
def test_compiles(self):
self.assertEqual(True, True)
def test_no_database(self):
metadata = self.crawler.next_downloadable()
try:
self.crawler.check_new_url(metadata.url)
self.assertTrue(False)
except UndefinedDatabaseException:
pass
def test_check_new_url(self):
self.crawler.set_database(self.database)
metadata = self.crawler.next_downloadable()
self.assertTrue(self.crawler.check_new_url(metadata.url))
self.database.publish_new_metadata(metadata)
self.assertFalse(self.crawler.check_new_url(metadata.url))
def test_run(self):
self.crawler.set_database(self.database)
metadata = self.crawler.run({})
self.database.publish_new_metadata(metadata)
id_list = self.database.fetch_video_id_list()
self.assertTrue(len(id_list) == 1)
metadata = self.database.fetch_metadata(id_list[0])
copy_metadata = self.crawler.next_downloadable()
copy_metadata.date_created = metadata.date_created
copy_metadata.id = metadata.id
self.assertEqual(metadata.to_json(), copy_metadata.to_json())
if __name__ == '__main__':
unittest.main()
| true | true |
f71b2f0c6d371df241e52fd406f5828a49387ea4 | 4,395 | py | Python | prepare.py | binmahone/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | 1 | 2021-12-23T02:45:06.000Z | 2021-12-23T02:45:06.000Z | prepare.py | Mukvin/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | null | null | null | prepare.py | Mukvin/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | 2 | 2021-09-16T10:18:01.000Z | 2021-09-17T08:40:47.000Z | import time
import boto3
from lib.Logger import Logger
from lib.popen import subprocess_popen
def prepare():
# 0. Initialize boto3 clients
emr = boto3.client('emr')
ec2 = boto3.client('ec2')
# 1. Create an EMR cluster on AWS
logger.info("Creating the EMR cluster...")
with open("./cloud/cluster.sh", 'r') as f:
cmd = f.read()
res = subprocess_popen(cmd)
cid = res[1][res[1].find("j-"):len(res[1])-2]
logger.info("Cluster created! Cluster ID is " + cid + ".")
# 2. Check if all EC2 instances are ready
logger.info("Creating EC2 instances for the cluster...")
found_flag = False
while found_flag is False:
time.sleep(15)
masters = []
slaves = []
masters_to_find = 1
slaves_to_find = 2
reservations = ec2.describe_instances()
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
is_instance = False
try:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:job-flow-id':
if tag['Value'] == cid:
is_instance = True
if is_instance:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:instance-group-role':
if tag['Value'] == 'MASTER':
masters.append(instance)
else:
slaves.append(instance)
except KeyError:
pass
if len(masters) == masters_to_find and len(slaves) == slaves_to_find:
with open("./cloud/instances", 'w') as f:
for instance in masters:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
for instance in slaves:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
print("Commands:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
for instance in slaves:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
print("Tunnels:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -N hadoop@" +
str(instance['PublicDnsName']) + " -L PORT:localhost:PORT", file=f)
found_flag = True
else:
logger.info("MASTERs to create: " + str(masters_to_find - len(masters)) + ", "
+ "SLAVEs to create: " + str(slaves_to_find - len(slaves)) + ".")
logger.info("All instances are created! Starting cluster...")
logger.info("It may take up to 10 minutes to start a cluster.")
started_flag = False
while started_flag is False:
time.sleep(55)
clusters = emr.list_clusters()
for cluster in clusters['Clusters']:
if cluster['Id'] == cid:
if cluster['Status']['State'] == 'WAITING':
started_flag = True
else:
logger.info("Cluster starting, please wait...")
break
logger.info("Cluster started!")
logger.info("Please connect to servers in Shell consoles. IPs to be connected is in ./cloud/instances.csv.")
logger.info("Remember to edit the configuration of your engine regarding internal network (if needed).")
return
if __name__ == '__main__':
logger = Logger('./log/benchmark.log', 'preparer')
prepare() | 46.755319 | 119 | 0.513311 | import time
import boto3
from lib.Logger import Logger
from lib.popen import subprocess_popen
def prepare():
emr = boto3.client('emr')
ec2 = boto3.client('ec2')
logger.info("Creating the EMR cluster...")
with open("./cloud/cluster.sh", 'r') as f:
cmd = f.read()
res = subprocess_popen(cmd)
cid = res[1][res[1].find("j-"):len(res[1])-2]
logger.info("Cluster created! Cluster ID is " + cid + ".")
logger.info("Creating EC2 instances for the cluster...")
found_flag = False
while found_flag is False:
time.sleep(15)
masters = []
slaves = []
masters_to_find = 1
slaves_to_find = 2
reservations = ec2.describe_instances()
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
is_instance = False
try:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:job-flow-id':
if tag['Value'] == cid:
is_instance = True
if is_instance:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:instance-group-role':
if tag['Value'] == 'MASTER':
masters.append(instance)
else:
slaves.append(instance)
except KeyError:
pass
if len(masters) == masters_to_find and len(slaves) == slaves_to_find:
with open("./cloud/instances", 'w') as f:
for instance in masters:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
for instance in slaves:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
print("Commands:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
for instance in slaves:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
print("Tunnels:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -N hadoop@" +
str(instance['PublicDnsName']) + " -L PORT:localhost:PORT", file=f)
found_flag = True
else:
logger.info("MASTERs to create: " + str(masters_to_find - len(masters)) + ", "
+ "SLAVEs to create: " + str(slaves_to_find - len(slaves)) + ".")
logger.info("All instances are created! Starting cluster...")
logger.info("It may take up to 10 minutes to start a cluster.")
started_flag = False
while started_flag is False:
time.sleep(55)
clusters = emr.list_clusters()
for cluster in clusters['Clusters']:
if cluster['Id'] == cid:
if cluster['Status']['State'] == 'WAITING':
started_flag = True
else:
logger.info("Cluster starting, please wait...")
break
logger.info("Cluster started!")
logger.info("Please connect to servers in Shell consoles. IPs to be connected is in ./cloud/instances.csv.")
logger.info("Remember to edit the configuration of your engine regarding internal network (if needed).")
return
if __name__ == '__main__':
logger = Logger('./log/benchmark.log', 'preparer')
prepare() | true | true |
f71b2fb9a2d9df9315262d217475ffce3958a2f8 | 9,376 | py | Python | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 3 | 2021-03-05T22:01:00.000Z | 2021-04-02T17:48:33.000Z | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 26 | 2021-06-01T09:43:42.000Z | 2022-03-16T15:11:52.000Z | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 1 | 2021-03-05T22:08:46.000Z | 2021-03-05T22:08:46.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Snowflake Computing Inc. All right reserved.
#
import json
import logging
from .auth import Auth
from .auth_by_plugin import AuthByPlugin
from .compat import unescape, urlencode, urlsplit
from .constants import HTTP_HEADER_ACCEPT, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_SERVICE_NAME, HTTP_HEADER_USER_AGENT
from .errorcode import ER_IDP_CONNECTION_ERROR, ER_INCORRECT_DESTINATION
from .errors import DatabaseError, Error
from .network import CONTENT_TYPE_APPLICATION_JSON, PYTHON_CONNECTOR_USER_AGENT
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
logger = logging.getLogger(__name__)
def _is_prefix_equal(url1, url2):
"""Checks if URL prefixes are identical.
The scheme, hostname and port number are compared. If the port number is not specified and the scheme is https,
the port number is assumed to be 443.
"""
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
"""Gets the post back URL.
Since the HTML is not well formed, minidom cannot be used to convert to
DOM. The first discovered form is assumed to be the form to post back
and the URL is taken from action attributes.
"""
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class AuthByOkta(AuthByPlugin):
"""Authenticate user by OKTA."""
def __init__(self, rest, application):
self._rest = rest
self._saml_response = None
self._application = application
@property
def assertion_content(self):
return self._saml_response
def update_body(self, body):
body['data']['RAW_SAML_RESPONSE'] = self._saml_response
def authenticate(
self, authenticator, service_name, account, user, password):
"""SAML Authentication.
Steps are:
1. query GS to obtain IDP token and SSO url
2. IMPORTANT Client side validation:
validate both token url and sso url contains same prefix
(protocol + host + port) as the given authenticator url.
Explanation:
This provides a way for the user to 'authenticate' the IDP it is
sending his/her credentials to. Without such a check, the user could
be coerced to provide credentials to an IDP impersonator.
3. query IDP token url to authenticate and retrieve access token
4. given access token, query IDP URL snowflake app to get SAML response
5. IMPORTANT Client side validation:
validate the post back url come back with the SAML response
contains the same prefix as the Snowflake's server url, which is the
intended destination url to Snowflake.
Explanation:
This emulates the behavior of IDP initiated login flow in the user
browser where the IDP instructs the browser to POST the SAML
assertion to the specific SP endpoint. This is critical in
preventing a SAML assertion issued to one SP from being sent to
another SP.
"""
logger.debug('authenticating by SAML')
headers, sso_url, token_url = self._step1(
authenticator, service_name, account, user)
self._step2(authenticator, sso_url, token_url)
one_time_token = self._step3(headers, token_url, user, password)
response_html = self._step4(one_time_token, sso_url)
self._step5(response_html)
def _step1(self, authenticator, service_name, account, user):
logger.debug('step 1: query GS to obtain IDP token and SSO url')
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if service_name:
headers[HTTP_HEADER_SERVICE_NAME] = service_name
url = "/session/authenticator-request"
body = Auth.base_auth_data(
user, account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body["data"]["AUTHENTICATOR"] = authenticator
logger.debug(
'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._rest._post_request(
url, headers, json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout)
if not ret['success']:
self.handle_failure(ret)
data = ret['data']
token_url = data['tokenUrl']
sso_url = data['ssoUrl']
return headers, sso_url, token_url
def _step2(self, authenticator, sso_url, token_url):
logger.debug('step 2: validate Token and SSO URL has the same prefix '
'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator is not supported: "
"{authenticator}, token_url: {token_url}, "
"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
def _step3(self, headers, token_url, user, password):
logger.debug('step 3: query IDP token url to authenticate and '
'retrieve access token')
data = {
'username': user,
'password': password,
}
ret = self._rest.fetch(
'post', token_url, headers,
data=json.dumps(data),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret.get('cookieToken')
if not one_time_token:
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The authentication failed for {user} "
"by {token_url}.".format(
token_url=token_url,
user=user,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return one_time_token
def _step4(self, one_time_token, sso_url):
logger.debug('step 4: query IDP URL snowflake app to get SAML '
'response')
url_parameters = {
'RelayState': "/some/deep/link",
'onetimetoken': one_time_token,
}
sso_url = sso_url + '?' + urlencode(url_parameters)
headers = {
HTTP_HEADER_ACCEPT: '*/*',
}
response_html = self._rest.fetch(
'get', sso_url, headers,
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
is_raw_text=True)
return response_html
def _step5(self, response_html):
logger.debug('step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = '{protocol}://{host}:{port}'.format(
protocol=self._rest._protocol,
host=self._rest._host,
port=self._rest._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator and destination "
"URL in the SAML assertion do not match: "
"expected: {url}, "
"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
'errno': ER_INCORRECT_DESTINATION,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
self._saml_response = response_html
| 39.561181 | 117 | 0.611455 |
import json
import logging
from .auth import Auth
from .auth_by_plugin import AuthByPlugin
from .compat import unescape, urlencode, urlsplit
from .constants import HTTP_HEADER_ACCEPT, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_SERVICE_NAME, HTTP_HEADER_USER_AGENT
from .errorcode import ER_IDP_CONNECTION_ERROR, ER_INCORRECT_DESTINATION
from .errors import DatabaseError, Error
from .network import CONTENT_TYPE_APPLICATION_JSON, PYTHON_CONNECTOR_USER_AGENT
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
logger = logging.getLogger(__name__)
def _is_prefix_equal(url1, url2):
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class AuthByOkta(AuthByPlugin):
def __init__(self, rest, application):
self._rest = rest
self._saml_response = None
self._application = application
@property
def assertion_content(self):
return self._saml_response
def update_body(self, body):
body['data']['RAW_SAML_RESPONSE'] = self._saml_response
def authenticate(
self, authenticator, service_name, account, user, password):
logger.debug('authenticating by SAML')
headers, sso_url, token_url = self._step1(
authenticator, service_name, account, user)
self._step2(authenticator, sso_url, token_url)
one_time_token = self._step3(headers, token_url, user, password)
response_html = self._step4(one_time_token, sso_url)
self._step5(response_html)
def _step1(self, authenticator, service_name, account, user):
logger.debug('step 1: query GS to obtain IDP token and SSO url')
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if service_name:
headers[HTTP_HEADER_SERVICE_NAME] = service_name
url = "/session/authenticator-request"
body = Auth.base_auth_data(
user, account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body["data"]["AUTHENTICATOR"] = authenticator
logger.debug(
'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._rest._post_request(
url, headers, json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout)
if not ret['success']:
self.handle_failure(ret)
data = ret['data']
token_url = data['tokenUrl']
sso_url = data['ssoUrl']
return headers, sso_url, token_url
def _step2(self, authenticator, sso_url, token_url):
logger.debug('step 2: validate Token and SSO URL has the same prefix '
'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator is not supported: "
"{authenticator}, token_url: {token_url}, "
"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
def _step3(self, headers, token_url, user, password):
logger.debug('step 3: query IDP token url to authenticate and '
'retrieve access token')
data = {
'username': user,
'password': password,
}
ret = self._rest.fetch(
'post', token_url, headers,
data=json.dumps(data),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret.get('cookieToken')
if not one_time_token:
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The authentication failed for {user} "
"by {token_url}.".format(
token_url=token_url,
user=user,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return one_time_token
def _step4(self, one_time_token, sso_url):
logger.debug('step 4: query IDP URL snowflake app to get SAML '
'response')
url_parameters = {
'RelayState': "/some/deep/link",
'onetimetoken': one_time_token,
}
sso_url = sso_url + '?' + urlencode(url_parameters)
headers = {
HTTP_HEADER_ACCEPT: '*/*',
}
response_html = self._rest.fetch(
'get', sso_url, headers,
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
is_raw_text=True)
return response_html
def _step5(self, response_html):
logger.debug('step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = '{protocol}://{host}:{port}'.format(
protocol=self._rest._protocol,
host=self._rest._host,
port=self._rest._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator and destination "
"URL in the SAML assertion do not match: "
"expected: {url}, "
"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
'errno': ER_INCORRECT_DESTINATION,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
self._saml_response = response_html
| true | true |
f71b2fcfe4e1bf00db7f8c9aa8bc9eac22fc3f1c | 2,248 | py | Python | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 4 | 2019-02-04T15:34:49.000Z | 2021-04-30T17:33:05.000Z | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 1 | 2021-03-19T22:26:11.000Z | 2021-03-19T22:26:11.000Z | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 5 | 2018-03-13T20:24:55.000Z | 2021-01-18T14:44:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Kyle Fitzsimmons, 2017
from datetime import datetime
import pytz
from models import db, PromptResponse
class MobilePromptsActions:
def get(self, prompts_uuids):
prompts_filters = PromptResponse.prompt_uuid.in_(prompts_uuids)
return db.session.query(PromptResponse).filter(prompts_filters)
# formats a prompts query to a lookup dictionary
# [p0, p1, p2] --> {p0_uuid: {p0_prompt_num: p0}, ...}
def create_lookup(self, prompts):
prompts_lookup = {}
for p in prompts:
prompts_lookup.setdefault(p.prompt_uuid, {})
prompts_lookup[p.prompt_uuid][p.prompt_num] = p
return prompts_lookup
def upsert(self, user, prompts):
prompts_uuids = {p['uuid'] for p in prompts}
existing_prompts = self.get(prompts_uuids)
existing_lookup = self.create_lookup(existing_prompts)
responses = []
for prompt in prompts:
# gracefully handle change of 'timestamp' -> 'displayed_at'
if 'timestamp' in prompt:
prompt['displayed_at'] = prompt.pop('timestamp')
uuid = prompt['uuid']
prompt_num = int(prompt['prompt_num'])
if uuid in existing_lookup:
response = existing_lookup[uuid][prompt_num]
response.response = prompt['answer']
response.recorded_at = prompt['recorded_at']
response.latitude = prompt['latitude']
response.longitude = prompt['longitude']
response.edited_at = datetime.now(pytz.utc)
else:
response = PromptResponse(
survey_id=user.survey_id,
mobile_id=user.id,
prompt_uuid=uuid,
prompt_num=prompt_num,
response=prompt['answer'],
displayed_at=prompt['displayed_at'],
recorded_at=prompt['recorded_at'],
latitude=prompt['latitude'],
longitude=prompt['longitude'])
responses.append(response)
db.session.bulk_save_objects(responses)
db.session.commit()
return responses
| 36.852459 | 71 | 0.589858 |
from datetime import datetime
import pytz
from models import db, PromptResponse
class MobilePromptsActions:
def get(self, prompts_uuids):
prompts_filters = PromptResponse.prompt_uuid.in_(prompts_uuids)
return db.session.query(PromptResponse).filter(prompts_filters)
def create_lookup(self, prompts):
prompts_lookup = {}
for p in prompts:
prompts_lookup.setdefault(p.prompt_uuid, {})
prompts_lookup[p.prompt_uuid][p.prompt_num] = p
return prompts_lookup
def upsert(self, user, prompts):
prompts_uuids = {p['uuid'] for p in prompts}
existing_prompts = self.get(prompts_uuids)
existing_lookup = self.create_lookup(existing_prompts)
responses = []
for prompt in prompts:
if 'timestamp' in prompt:
prompt['displayed_at'] = prompt.pop('timestamp')
uuid = prompt['uuid']
prompt_num = int(prompt['prompt_num'])
if uuid in existing_lookup:
response = existing_lookup[uuid][prompt_num]
response.response = prompt['answer']
response.recorded_at = prompt['recorded_at']
response.latitude = prompt['latitude']
response.longitude = prompt['longitude']
response.edited_at = datetime.now(pytz.utc)
else:
response = PromptResponse(
survey_id=user.survey_id,
mobile_id=user.id,
prompt_uuid=uuid,
prompt_num=prompt_num,
response=prompt['answer'],
displayed_at=prompt['displayed_at'],
recorded_at=prompt['recorded_at'],
latitude=prompt['latitude'],
longitude=prompt['longitude'])
responses.append(response)
db.session.bulk_save_objects(responses)
db.session.commit()
return responses
| true | true |
f71b2fd793a7e41ee094ac47d7408dbb3c13f221 | 804 | py | Python | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | import json, os
class GarfData:
file = os.path.join(os.getcwd(), "garf_data.json")
token: str
path_to_ffmpeg: str
jokes: list
trigger_words: list
def __init__(self):
with open(self.file, "r") as f:
json_dict = json.loads(f.read())
self.token = json_dict["token"]
self.path_to_ffmpeg = json_dict["path_to_ffmpeg"]
self.jokes = json_dict["jokes"]
self.trigger_words = json_dict["trigger_words"]
def overwrite(self):
json_dict = {
"token": self.token,
"path_to_ffmpeg": self.path_to_ffmpeg,
"jokes": self.jokes,
"trigger_words": self.trigger_words
}
with open(self.file, "w") as f:
f.write(json.dumps(json_dict, indent = 4) + os.linesep)
| 27.724138 | 67 | 0.584577 | import json, os
class GarfData:
file = os.path.join(os.getcwd(), "garf_data.json")
token: str
path_to_ffmpeg: str
jokes: list
trigger_words: list
def __init__(self):
with open(self.file, "r") as f:
json_dict = json.loads(f.read())
self.token = json_dict["token"]
self.path_to_ffmpeg = json_dict["path_to_ffmpeg"]
self.jokes = json_dict["jokes"]
self.trigger_words = json_dict["trigger_words"]
def overwrite(self):
json_dict = {
"token": self.token,
"path_to_ffmpeg": self.path_to_ffmpeg,
"jokes": self.jokes,
"trigger_words": self.trigger_words
}
with open(self.file, "w") as f:
f.write(json.dumps(json_dict, indent = 4) + os.linesep)
| true | true |
f71b2fde98fb40c654242c9096ab88064334974c | 33,682 | py | Python | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | 2 | 2022-01-28T10:35:53.000Z | 2022-03-09T14:38:59.000Z | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | 1 | 2022-03-07T10:48:11.000Z | 2022-03-07T10:48:11.000Z | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | null | null | null | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
"""
[Func Handler] multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"feat_cls": feat_cls,
"output": output,
}
@Reture:
merge_out: dict.
examples: {
"feat_cls": feat_cls,
"output_comb": output_comb,
"output": output,
}
"""
feat_cls = multi_modal_data['feat_cls']
# merge class features
feat_cls_cat = torch.cat(feat_cls, 1) # concat
# merge output pred
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
"""
[Func Handler] objective_vectors_multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"class_threshold_group": [model.class_threshold_group[modal_idx][i], ...]
"objective_vectors_group": [model.objective_vectors_group[modal_idx][i], ...],
}
cate_idx: int. 0 ~ 18
modal_ids: list.
examples: [0, 1] or [0,]
@Reture:
merge_out: dict.
examples: {
"class_threshold": class_threshold,
"objective_vectors": objective_vectors,
}
"""
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
# modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
'''calculate accuracy. caring about recall but not IoU'''
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
#self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250 #ignore index
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
#self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
# cluster vectors & cuda initialization
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
#self.metrics = CustomMetrics(self.class_numbers)
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
# Discriminator
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
'''
set optimizer and load pretrained model
'''
for net in self.nets:
# name = net.__class__.__name__
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
'''load pretrained model
'''
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
"""source_output: [B x 19 x W x H, ...]
select modal-branch output in each batchsize
Specific-modal output
"""
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
# attention output & specific-modal output
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
#self.PredNet.eval()
# adversarial loss
# -----------------------------
"""Generator (segmentation)"""
# -----------------------------
# On Source Domain
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
# set grad false
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
# true/false discriminator
t_D_out = _d_net_DP(F.softmax(t_out))
#source_modal_ids
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
#loss_adv /= _batch_size
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
# -----------------------------
"""Discriminator """
# -----------------------------
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
# source domain
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
# target domain
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
#loss = torch.Tensor([0]).cuda(self.default_gpu)
loss = torch.Tensor([0]).cuda()
"""construct category objective vectors"""
# objective_vectors_group 2 x 19 x 256 --> 19 x 512
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
# baseNet scheduler
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
# if torch.cuda.is_available():
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
"""Make specific models eval mode during test time"""
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
"""Set requires_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
net (BaseModel) -- the network which will be operated on
requires_grad (bool) -- whether the networks require gradients or not
"""
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
''' set specific type of layers whether needing grad
'''
# print('Warning: all the BatchNorm params are fixed!')
# logger.info('Warning: all the BatchNorm params are fixed!')
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| 43.293059 | 173 | 0.598064 | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
feat_cls = multi_modal_data['feat_cls']
feat_cls_cat = torch.cat(feat_cls, 1)
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0)
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
for net in self.nets:
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
t_D_out = _d_net_DP(F.softmax(t_out))
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
loss = torch.Tensor([0]).cuda()
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| true | true |
f71b30ea10a42f00072f8fb902d4bdeca0fdac2c | 4,663 | py | Python | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips
import random
import numpy as np
import time
# from video_facial_landmarks_minmax import calculate_distance
from video_pose_landmarks import calculate_pose_distance
TEST = True
TEST_TIME = 20
INIT_NUM = float("Inf")
WINDOW_TIME = 10
PADDED_TIME = 4 # 얼굴이 클로즈업 된게 있으면 계속 클로즈업 된 부분만 찾으므로 3초정도 띄어준다.
# def distance(reference_clip, clip):
# min_diff, min_idx, additional_info = calculate_distance(reference_clip, clip)
# return min_diff, min_idx
def pose_distance(reference_clip, clip):
min_diff, min_idx, additional_info = calculate_pose_distance(reference_clip, clip)
return min_diff, min_idx
def crosscut(videos_path="./video", option="random"):
min_time = 1000.0
min_idx = 0
audioclip = None
extracted_clips_array = []
video_num = len(os.listdir(videos_path))
start_times = [0] * video_num # VIDEO ALIGNMENT -> SLICE START TIME
# VIDEO ALIGNMENT -> SLICE START TIME
for i in range(len(os.listdir(videos_path))):
video_path = os.path.join(videos_path, sorted(os.listdir(videos_path))[i])
clip = VideoFileClip(video_path)
clip = clip.subclip(start_times[i], clip.duration) # 그냥 전체 영상을 시작점 맞게 자르기
print(video_path, clip.fps, clip.duration)
if min_time > clip.duration: # ?? 제일 작은거 기준으로 자르려는건가?? 근데 그러면 그 앞에건 이미 크지않나??
audioclip = clip.audio
min_time = clip.duration
min_idx = i
print(video_path, clip.fps, clip.duration)
extracted_clips_array.append(clip)
print(len(extracted_clips_array))
if TEST: # test하면 일부분만 생성해서 빠르게 확인하기
min_time = TEST_TIME
audioclip = audioclip.set_duration(TEST_TIME)
# GENERATE STAGEMIX
# CONCAT SUBCLIP 0~ MIN DURATION CLIP TIME
con_clips = []
t = 3 # 초반 3초 INIT
current_idx = 0 # INIT
con_clips.append(extracted_clips_array[current_idx].subclip(0, min(t, int(min_time))))
while t < min_time:
# 10 sec.
cur_t = t
next_t = min(t+WINDOW_TIME, min_time) # 마지막은 window초보다 작은초일수도 있으니
# RANDOM BASED METHOD
if option=="random" or min(min_time,t + PADDED_TIME)==min_time:
random_video_idx = random.randint(0, len(extracted_clips_array)-1)
clip = extracted_clips_array[random_video_idx].subclip(cur_t, next_t)
t = next_t
con_clips.append(clip)
else:
# 지금 현재 영상!
reference_clip = extracted_clips_array[current_idx].subclip(cur_t, next_t)
d = INIT_NUM
cur_clip = None
# inf가 있을때는 이 idx로 설정됨!
min_idx = (current_idx+1)%len(extracted_clips_array)
for video_idx in range(len(extracted_clips_array)):
if video_idx == current_idx:
continue
# 10초간 영상 확인
clip = extracted_clips_array[video_idx].subclip(cur_t, next_t)
# 이미 확인한 앞부분은 무시해야 함!!(! 첫번째 영상은 3초는 무조건 안겹치는 문제 있음)
# !! ㅜㅜ 제일 좋은 얼굴 부분 놓칠수도 있을듯!
# CALCULATE DISTANCE
cur_d, plus_frame = pose_distance(reference_clip, clip)
print(current_idx, video_idx, cur_d, cur_t + plus_frame)
if d > cur_d:
d = cur_d
min_idx = video_idx
next_t = cur_t + plus_frame # 바로 옮길 frame
cur_clip = reference_clip.subclip(0, plus_frame)
# next_clip = clip.subclip(0, plus_frame) # 그 바꿀 부분만 자르는 클립!
# 이번 clip : 10초 내에서 자르거나 10초 full append
if cur_clip: # 계산이 가능하면
clip = cur_clip # 현재 클립(바꾸면 가장 좋은 부분까지 잘린 현재 클립)
else:
clip = reference_clip # 현재 클립 10초 모두
t = next_t
con_clips.append(clip)
# 다음 clip : padding 길이는 반드시 append
# 뒤에 padding 데이터 더하기
current_idx = min_idx # 바로 다음에 이어지면 가까운 거리로 연결되는 데이터
print("idx : {}".format(current_idx))
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t+PADDED_TIME)) # min_time을 넘어가면 안됨!
t = min(min_time,t + PADDED_TIME) # padding 된 시간 더하기
con_clips.append(pad_clip)
final_clip = concatenate_videoclips(con_clips)
if audioclip !=None:
print("Not None")
final_clip.audio = audioclip
final_clip.write_videofile("crosscut_fiesta.mp4")
return final_clip
start_time = time.time()
crosscut(videos_path="./video", option="norandom")
end_time = time.time()
print(end_time - start_time)
| 36.716535 | 118 | 0.619558 | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips
import random
import numpy as np
import time
from video_pose_landmarks import calculate_pose_distance
TEST = True
TEST_TIME = 20
INIT_NUM = float("Inf")
WINDOW_TIME = 10
PADDED_TIME = 4
def pose_distance(reference_clip, clip):
min_diff, min_idx, additional_info = calculate_pose_distance(reference_clip, clip)
return min_diff, min_idx
def crosscut(videos_path="./video", option="random"):
min_time = 1000.0
min_idx = 0
audioclip = None
extracted_clips_array = []
video_num = len(os.listdir(videos_path))
start_times = [0] * video_num
for i in range(len(os.listdir(videos_path))):
video_path = os.path.join(videos_path, sorted(os.listdir(videos_path))[i])
clip = VideoFileClip(video_path)
clip = clip.subclip(start_times[i], clip.duration)
print(video_path, clip.fps, clip.duration)
if min_time > clip.duration:
audioclip = clip.audio
min_time = clip.duration
min_idx = i
print(video_path, clip.fps, clip.duration)
extracted_clips_array.append(clip)
print(len(extracted_clips_array))
if TEST:
min_time = TEST_TIME
audioclip = audioclip.set_duration(TEST_TIME)
con_clips = []
t = 3
current_idx = 0
con_clips.append(extracted_clips_array[current_idx].subclip(0, min(t, int(min_time))))
while t < min_time:
cur_t = t
next_t = min(t+WINDOW_TIME, min_time)
if option=="random" or min(min_time,t + PADDED_TIME)==min_time:
random_video_idx = random.randint(0, len(extracted_clips_array)-1)
clip = extracted_clips_array[random_video_idx].subclip(cur_t, next_t)
t = next_t
con_clips.append(clip)
else:
reference_clip = extracted_clips_array[current_idx].subclip(cur_t, next_t)
d = INIT_NUM
cur_clip = None
min_idx = (current_idx+1)%len(extracted_clips_array)
for video_idx in range(len(extracted_clips_array)):
if video_idx == current_idx:
continue
clip = extracted_clips_array[video_idx].subclip(cur_t, next_t)
cur_d, plus_frame = pose_distance(reference_clip, clip)
print(current_idx, video_idx, cur_d, cur_t + plus_frame)
if d > cur_d:
d = cur_d
min_idx = video_idx
next_t = cur_t + plus_frame
cur_clip = reference_clip.subclip(0, plus_frame)
if cur_clip:
clip = cur_clip
else:
clip = reference_clip
t = next_t
con_clips.append(clip)
current_idx = min_idx
print("idx : {}".format(current_idx))
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t+PADDED_TIME))
t = min(min_time,t + PADDED_TIME)
con_clips.append(pad_clip)
final_clip = concatenate_videoclips(con_clips)
if audioclip !=None:
print("Not None")
final_clip.audio = audioclip
final_clip.write_videofile("crosscut_fiesta.mp4")
return final_clip
start_time = time.time()
crosscut(videos_path="./video", option="norandom")
end_time = time.time()
print(end_time - start_time)
| true | true |
f71b30f5180889ca3dd9df9574f878409863dfe6 | 1,003 | py | Python | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | import Averager as Avg
class Average:
def __init__(self):
self.answer = 0
self.delta_time = 0
def average(self, avg, delta_t):
print("Average: " + str(avg) + " Timespan: " + str(delta_t))
self.answer = avg
self.delta_time = delta_t
def test_averager():
avg = Average()
averager = Avg.TimeAverager(1, avg.average)
averager.update_average(12.345)
assert avg.answer == 12.345
assert avg.delta_time > 0
def test_averager_averages():
avg = Average()
averager = Avg.TimeAverager(10, avg.average)
averager.update_average(1)
assert avg.answer == 0
assert avg.delta_time == 0
for i in range(2, 10):
averager.update_average(i)
assert avg.answer == 0
assert avg.delta_time == 0
averager.update_average(10)
assert avg.answer == 5.5
assert avg.delta_time > 0
def test_averager_function_none():
averager = Avg.TimeAverager(1, None)
averager.update_average(12.345)
| 20.469388 | 68 | 0.639083 | import Averager as Avg
class Average:
def __init__(self):
self.answer = 0
self.delta_time = 0
def average(self, avg, delta_t):
print("Average: " + str(avg) + " Timespan: " + str(delta_t))
self.answer = avg
self.delta_time = delta_t
def test_averager():
avg = Average()
averager = Avg.TimeAverager(1, avg.average)
averager.update_average(12.345)
assert avg.answer == 12.345
assert avg.delta_time > 0
def test_averager_averages():
avg = Average()
averager = Avg.TimeAverager(10, avg.average)
averager.update_average(1)
assert avg.answer == 0
assert avg.delta_time == 0
for i in range(2, 10):
averager.update_average(i)
assert avg.answer == 0
assert avg.delta_time == 0
averager.update_average(10)
assert avg.answer == 5.5
assert avg.delta_time > 0
def test_averager_function_none():
averager = Avg.TimeAverager(1, None)
averager.update_average(12.345)
| true | true |
f71b315d6312d73a4f7581bd22785f23c8cb7785 | 5,935 | py | Python | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:07:32.000Z | 2017-07-31T07:07:32.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | null | null | null | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
## scheduler_factory.SchedulerTypes()
scheduler_factory.SchedulerDescription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
self.ran_resume = True
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 29.824121 | 85 | 0.694356 |
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
ription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
self.ran_resume = True
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| true | true |
f71b32a53e1eb2f384ead41803a3f5892542c5b5 | 6,308 | py | Python | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 8 | 2019-06-04T02:50:13.000Z | 2022-02-10T06:46:51.000Z | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 16 | 2021-06-07T21:32:30.000Z | 2022-03-31T21:08:29.000Z | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 3 | 2020-02-24T12:57:54.000Z | 2021-10-04T13:29:00.000Z | # Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
| 30.47343 | 102 | 0.566265 |
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
| true | true |
f71b32db7390644ae31d4b98d84b70883c847091 | 1,777 | py | Python | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 78 | 2015-09-09T00:48:19.000Z | 2022-02-25T14:18:46.000Z | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 15 | 2015-09-09T03:56:29.000Z | 2020-01-03T07:18:50.000Z | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 22 | 2015-09-09T03:05:37.000Z | 2021-07-24T07:35:59.000Z | import sys
sys.path.append("site-packages")
import json
import string
from unidecode import unidecode
from urllib import parse
from azure.storage.blob import BlockBlobService
from datetime import datetime
import animesources
indexedShows = {}
shows = []
with open('title-map.json') as titlemap_file:
titlemap = json.load(titlemap_file)
with open('multi-season.json') as multiseason_file:
multiseason = json.load(multiseason_file)
with open('azure.json') as azure_file:
azure_storage = json.load(azure_file)
azure_blob = BlockBlobService(account_name=azure_storage['account'], account_key=azure_storage['key'])
with open('proxies.json') as proxies_file:
proxy_data = json.load(proxies_file)
proxy = proxy_data['uk']
sources = [
animesources.Crunchyroll(titlemap, multiseason, 'uk', proxy),
animesources.Funimation(titlemap, multiseason, 'gb', proxy),
animesources.Netflix(titlemap, multiseason, 'uk', proxy),
animesources.HiDive(titlemap, multiseason, 'uk', proxy),
animesources.AmazonPrime(titlemap, multiseason, 'uk', proxy)
]
for source in sources:
source.UpdateShowList(indexedShows)
print(source.GetName() + ': ' + str(len(indexedShows)))
shows = indexedShows.values()
with open('alternates.json') as alternates_file:
alternates = json.load(alternates_file)
for alternate in alternates:
match_index = next((i for i, x in enumerate(shows) if animesources.compare(x['name'], alternate)), False)
if (match_index):
shows[match_index]['alt'] = alternates[alternate]
shows = sorted(shows, key = lambda show: show['name'].lower())
blob = {"lastUpdated": datetime.utcnow().isoformat(), "shows": shows}
out_file = open('uk.json', 'w')
json.dump(blob, out_file)
out_file.close()
azure_blob.create_blob_from_path(
'assets',
'uk.json',
'uk.json'
)
print('done') | 34.843137 | 106 | 0.758582 | import sys
sys.path.append("site-packages")
import json
import string
from unidecode import unidecode
from urllib import parse
from azure.storage.blob import BlockBlobService
from datetime import datetime
import animesources
indexedShows = {}
shows = []
with open('title-map.json') as titlemap_file:
titlemap = json.load(titlemap_file)
with open('multi-season.json') as multiseason_file:
multiseason = json.load(multiseason_file)
with open('azure.json') as azure_file:
azure_storage = json.load(azure_file)
azure_blob = BlockBlobService(account_name=azure_storage['account'], account_key=azure_storage['key'])
with open('proxies.json') as proxies_file:
proxy_data = json.load(proxies_file)
proxy = proxy_data['uk']
sources = [
animesources.Crunchyroll(titlemap, multiseason, 'uk', proxy),
animesources.Funimation(titlemap, multiseason, 'gb', proxy),
animesources.Netflix(titlemap, multiseason, 'uk', proxy),
animesources.HiDive(titlemap, multiseason, 'uk', proxy),
animesources.AmazonPrime(titlemap, multiseason, 'uk', proxy)
]
for source in sources:
source.UpdateShowList(indexedShows)
print(source.GetName() + ': ' + str(len(indexedShows)))
shows = indexedShows.values()
with open('alternates.json') as alternates_file:
alternates = json.load(alternates_file)
for alternate in alternates:
match_index = next((i for i, x in enumerate(shows) if animesources.compare(x['name'], alternate)), False)
if (match_index):
shows[match_index]['alt'] = alternates[alternate]
shows = sorted(shows, key = lambda show: show['name'].lower())
blob = {"lastUpdated": datetime.utcnow().isoformat(), "shows": shows}
out_file = open('uk.json', 'w')
json.dump(blob, out_file)
out_file.close()
azure_blob.create_blob_from_path(
'assets',
'uk.json',
'uk.json'
)
print('done') | true | true |
f71b33566c8e0a884e4d1704ac06c8583ef46398 | 7,939 | py | Python | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | import time
import pytest
from limits.errors import ConfigurationError
from limits.storage import (
MemcachedStorage,
MemoryStorage,
MongoDBStorage,
RedisClusterStorage,
RedisSentinelStorage,
RedisStorage,
Storage,
storage_from_string,
)
from limits.strategies import MovingWindowRateLimiter
class TestBaseStorage:
@pytest.mark.parametrize(
"uri, args, expected_instance, fixture",
[
("memory://", {}, MemoryStorage, None),
pytest.param(
"redis://localhost:7379",
{},
RedisStorage,
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
RedisClusterStorage,
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
MongoDBStorage,
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_string(self, uri, args, expected_instance, fixture):
assert isinstance(storage_from_string(uri, **args), expected_instance)
@pytest.mark.parametrize(
"uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})]
)
def test_invalid_storage_string(self, uri, args):
with pytest.raises(ConfigurationError):
storage_from_string(uri, **args)
@pytest.mark.parametrize(
"uri, args, fixture",
[
("memory://", {}, None),
pytest.param(
"redis://localhost:7379",
{},
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:36379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_check(self, uri, args, fixture):
assert storage_from_string(uri, **args).check()
def test_pluggable_storage_no_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
with pytest.raises(NotImplementedError):
MovingWindowRateLimiter(storage)
def test_pluggable_storage_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
def acquire_entry(self, *a, **k):
return True
def get_moving_window(self, *a, **k):
return (time.time(), 1)
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
MovingWindowRateLimiter(storage)
| 32.272358 | 84 | 0.493513 | import time
import pytest
from limits.errors import ConfigurationError
from limits.storage import (
MemcachedStorage,
MemoryStorage,
MongoDBStorage,
RedisClusterStorage,
RedisSentinelStorage,
RedisStorage,
Storage,
storage_from_string,
)
from limits.strategies import MovingWindowRateLimiter
class TestBaseStorage:
@pytest.mark.parametrize(
"uri, args, expected_instance, fixture",
[
("memory://", {}, MemoryStorage, None),
pytest.param(
"redis://localhost:7379",
{},
RedisStorage,
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
RedisClusterStorage,
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
MongoDBStorage,
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_string(self, uri, args, expected_instance, fixture):
assert isinstance(storage_from_string(uri, **args), expected_instance)
@pytest.mark.parametrize(
"uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})]
)
def test_invalid_storage_string(self, uri, args):
with pytest.raises(ConfigurationError):
storage_from_string(uri, **args)
@pytest.mark.parametrize(
"uri, args, fixture",
[
("memory://", {}, None),
pytest.param(
"redis://localhost:7379",
{},
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:36379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_check(self, uri, args, fixture):
assert storage_from_string(uri, **args).check()
def test_pluggable_storage_no_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
with pytest.raises(NotImplementedError):
MovingWindowRateLimiter(storage)
def test_pluggable_storage_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
def acquire_entry(self, *a, **k):
return True
def get_moving_window(self, *a, **k):
return (time.time(), 1)
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
MovingWindowRateLimiter(storage)
| true | true |
f71b336c74bf785c71596fc3f4e1c0603495a240 | 37,268 | py | Python | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | 1 | 2020-06-22T10:20:52.000Z | 2020-06-22T10:20:52.000Z | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import json
from mock import patch, Mock, MagicMock
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
from indexer.indexers.sql import SQLIndexer
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'split': ('/A', 'a'), 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True, u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'', u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100, u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}], u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'', u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats', u'tableFormat': u'text', 'ouputFormat': u'table', u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders', u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'\\002'}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''LOAD DATA INPATH '/user/romain/customer_stats.csv' INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql, sql)
def test_generate_create_kudu_table_with_data():
source = {u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file', u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','}, u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}], u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True, u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True, u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data', u'tableFormat': u'kudu', u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'funny', u'level': 0, u'scale':4, u'precision':10, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'}, {u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")''' in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, `full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`;''' in sql, sql)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", "name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table","columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
| 165.635556 | 7,415 | 0.674305 |
from builtins import object
import json
from mock import patch, Mock, MagicMock
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
from indexer.indexers.sql import SQLIndexer
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'split': ('/A', 'a'), 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True, u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'', u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100, u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}], u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'', u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats', u'tableFormat': u'text', 'ouputFormat': u'table', u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders', u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'\\002'}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''LOAD DATA INPATH '/user/romain/customer_stats.csv' INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql, sql)
def test_generate_create_kudu_table_with_data():
source = {u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file', u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','}, u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}], u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True, u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True, u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data', u'tableFormat': u'kudu', u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'funny', u'level': 0, u'scale':4, u'precision':10, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'}, {u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")''' in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, `full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`;''' in sql, sql)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", "name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table","columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
| true | true |
f71b3428812e3f4af3ba5ec76b4fec00628e68ec | 1,009 | py | Python | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
import json
import single_eval as sev
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello,This is TextCNN")
class ClassifyHandler(tornado.web.RequestHandler):
def get(self):
data = self.get_argument('q', 'Hello')
predict_result = sev.classify(data)
self.write("this is Classfication for text,get method and result:{}".format(predict_result))
def post(self):
self.write("this is classfication for text ,post method")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?",IndexHandler),
(r"/classify/?",ClassifyHandler)
]
tornado.web.Application.__init__(self,handlers=handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main()
| 25.871795 | 100 | 0.667988 |
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
import json
import single_eval as sev
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello,This is TextCNN")
class ClassifyHandler(tornado.web.RequestHandler):
def get(self):
data = self.get_argument('q', 'Hello')
predict_result = sev.classify(data)
self.write("this is Classfication for text,get method and result:{}".format(predict_result))
def post(self):
self.write("this is classfication for text ,post method")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?",IndexHandler),
(r"/classify/?",ClassifyHandler)
]
tornado.web.Application.__init__(self,handlers=handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main()
| true | true |
f71b345986d63817e8ebf1e91022534a55821bb0 | 1,010 | py | Python | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | 2 | 2020-10-17T04:46:08.000Z | 2020-10-17T04:46:10.000Z | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | null | null | null | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | 2 | 2020-10-07T03:33:19.000Z | 2020-10-07T04:07:16.000Z | from setuptools import setup
setup(
name='flask_req_parser',
version='0.1.4',
url='https://github.com/Rhyanz46/flask-req-parser',
license='BSD',
author='Arian Saputra',
author_email='rianariansaputra@gmail.com',
description='Simple Request parser for flask',
long_description=__doc__,
# packages=find_packages(),
# py_modules=['flask_req_parser'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_req_parser'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | 30.606061 | 70 | 0.643564 | from setuptools import setup
setup(
name='flask_req_parser',
version='0.1.4',
url='https://github.com/Rhyanz46/flask-req-parser',
license='BSD',
author='Arian Saputra',
author_email='rianariansaputra@gmail.com',
description='Simple Request parser for flask',
long_description=__doc__,
packages=['flask_req_parser'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | true | true |
f71b36d9600e32803a5a8daae7b744b0672e4fd7 | 24,960 | py | Python | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | import sqlite3
class TorrentState:
SEARCHING = "SEARCHING" # Still being searched
DOWNLOADING = "DOWNLOADING" # Currently being downloading
SEEDING = "SEEDING" # Currently uploading
COMPLETED = "COMPLETED" # Removed from seeding
DELETING = "DELETING" # Torrent marked for deletion
PAUSED = "PAUSED" # Download stopped
@staticmethod
def get_states() -> list:
return [
TorrentState.SEARCHING,
TorrentState.DOWNLOADING,
TorrentState.SEEDING,
TorrentState.COMPLETED,
TorrentState.DELETING,
TorrentState.PAUSED
]
class TBDatabase:
"""
Database Handler
Attributes
----------
db_file_path : str
the database file path (sqlite)
"""
def __init__(self, db_file_path: str) -> None:
self.db_file_path = db_file_path
self.connection = sqlite3.connect(self.db_file_path)
self.connection.execute("PRAGMA foreign_keys = ON")
self.connection.row_factory = dict_factory
self.states = TorrentState()
def create_schema(self) -> None:
"""Initializes the database by creating the necessary schema.
Args:
db_file (str): the file to were the database state will be stored
"""
cur = self.connection.cursor()
# Create Movies table
sql = f"""CREATE TABLE IF NOT EXISTS movies (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"imdbid" INTEGER UNIQUE NOT NULL,
"cover_url" TEXT,
"hash" TEXT)
"""
cur.execute(sql)
# Create TV Shows Table
sql = f"""CREATE TABLE IF NOT EXISTS tv_shows (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_episode_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"imdbid" INTEGER UNIQUE NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"cover_url" TEXT
)
"""
cur.execute(sql)
# Create TV Show seasons
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_seasons (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"show_id" INTEGER,
"season_number" INTEGER NOT NULL,
"season_number_episodes" INTEGER NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(show_id) REFERENCES tv_shows(id) ON DELETE CASCADE,
UNIQUE(show_id, season_number))
"""
cur.execute(sql)
# Create TV Show season episodes
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_season_episodes (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"season_id" INTEGER,
"name" TEXT NOT NULL,
"episode_number" INTEGER NOT NULL,
"air_date" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(season_id) REFERENCES tv_show_seasons(id) ON DELETE CASCADE,
UNIQUE(season_id, episode_number))
"""
cur.execute(sql)
# Create tv shows with seasons view
sql = f"""CREATE VIEW IF NOT EXISTS tv_shows_with_seasons_view
AS
SELECT
tv_shows.id as show_id,
tv_shows.name as show_name,
tv_shows.state as show_state,
tv_shows.resolution_profile as resolution_profile,
tv_shows.name as show_name,
tv_shows.max_episode_size_mb as max_episode_size_mb,
tv_shows.imdbid as show_imdbid,
tv_show_seasons.id as season_id,
tv_show_seasons.season_number as season_number,
tv_show_seasons.season_number_episodes as season_number_episodes,
tv_show_seasons.state as season_state,
tv_show_seasons.hash as season_hash
FROM tv_shows
INNER JOIN tv_show_seasons on tv_shows.id = tv_show_seasons.show_id;
"""
cur.execute(sql)
# Create seaons with episodes view
sql = f"""CREATE VIEW IF NOT EXISTS tv_show_seasons_with_episodes_view
AS
SELECT
tv_show_seasons.id as season_id,
tv_show_seasons.hash as season_hash,
tv_show_seasons.state as season_state,
tv_show_seasons.season_number as season_number,
tv_show_season_episodes.id as episode_id,
tv_show_season_episodes.name as episode_name,
tv_show_season_episodes.air_date as episode_air_date,
tv_show_season_episodes.episode_number as episode_number,
tv_show_season_episodes.state as episode_state,
tv_show_season_episodes.hash as episode_hash
FROM tv_show_seasons
INNER JOIN tv_show_season_episodes on tv_show_seasons.id = tv_show_season_episodes.season_id;
"""
cur.execute(sql)
# commit changes and close the connection
self.connection.commit()
def get_all_movies(self) -> list:
"""Retrieves all movies
Returns:
list: the the list of movies
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies")
return cur.fetchall()
def get_all_tv_shows(self) -> list:
"""Retrieves all tv shows
Returns:
list: the list of tv shows
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows;")
return cur.fetchall()
def get_all_seasons(self) -> list:
"""Retrieves all tv show seasons
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons;")
return cur.fetchall()
def get_all_episodes(self) -> list:
"""Retrieves all episodes
Returns:
list: the list of episdoes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes;")
return cur.fetchall()
def get_all_tv_shows_with_seasons(self) -> list:
"""Retrieves all tv shows and seasons
Returns:
list: the list of tv shows and seaons
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_with_seasons_view;")
return cur.fetchall()
def get_all_tv_shows_season_episodes(self) -> list:
"""Retrieves all tv shows seasons and episodes
Returns:
list: the list of tv shows season episodes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_season_with_episodes_view;")
return cur.fetchall()
def get_movies_by_state(self, state: str) -> list:
"""Retrieves all movies stored in the database with the specified state
Args:
state (str): the state (must match a valid state)
Returns:
list: the list of movies
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE state=?", (state,))
return cur.fetchall()
def get_tv_shows_by_state(self, state: str) -> list:
"""Retrieves all tv shows with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv shows
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE state=?", (state,))
return cur.fetchall()
def get_tv_show_with_seasons_by_state(self, state: str) -> list:
"""Retrieves all tv show seaons with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv show seasons
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_shows_with_seasons_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_tv_show_seasons_with_episodes_by_state(self, state: str) -> list:
"""Retrieves all tv show season episodes with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv show season episodes
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_show_seasons_with_episodes_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_movie(self, id: int) -> dict:
"""Retrieves a movie
Args:
id (int): the id of the movie to retrieve
Returns:
dict: the movie
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show(self, id: int) -> dict:
"""Retrieves a tv show
Args:
id (int): the id of the tv show to retrieve
Returns:
dict: the tv show
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_season(self, id: str) -> dict:
"""Retrieves a tv show season
Args:
id (str): the id of the tv show season to retrieve
Returns:
dict: the tv show season
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_with_seasons(self, id: str) -> list:
"""Retrieves all seasons for the tv show with the sepecified id
Args:
id (str): the tv show id
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * from tv_shows_with_seasons_view WHERE show_id=?", (id,))
return cur.fetchall()
def get_tv_show_season_with_episodes(self, id: str) -> list:
"""Retrieves all seasons for the tv show with the sepecified id
Args:
id (str): the tv show id
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * from tv_show_seasons_with_episodes_view WHERE season_id=?", (id,))
return cur.fetchall()
def get_season_episodes(self, season_id: str) -> list:
"""Retrieves all episodes for the specified season id
Args:
season_id (str): the season id
Returns:
list: the list of episodes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
return result
def delete_movie(self, id: int) -> None:
"""Delete a movie
Args:
id (int): the id of the movie to delete
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM movies WHERE id=?", (id,))
self.connection.commit()
def delete_tv_show(self, id: int) -> None:
"""Delete a tv show
Args:
id (int): the id of the tv show to delete
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_shows WHERE id=?", (id,))
self.connection.commit()
def delete_season(self, id: int):
"""Delete a season
Args:
id (int): the season id
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_seasons WHERE id=?", (id,))
self.connection.commit()
def delete_episode(self, id: int):
"""Delete an epidose
Args:
id (int): the episode id
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_season_episodes WHERE id=?", (id,))
self.connection.commit()
def add_movie(self, name: str, max_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
"""Adds a movie to the database
Args:
name (str): the movie name
max_size_mb (int): the movie max size in megabytes
resolution_profile (str): the desired resolutions
imdbid (str): the imdbid
cover_url (str): the cover image url
Returns:
int: the id of the inserted movie
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO movies(name,max_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?,?)
""",
(name, max_size_mb, resolution_profile, imdbid,cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show(self, name: str, max_episode_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
"""Adds a tv show to the database
Args:
name (str): the tv show name
max_episode_size_mb (int): the max size of an episode
resolution_profile (str): the desired resolutions
imdbid (str): the imdb id
cover_url (str): the cover image url
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_shows(name,max_episode_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?, ?)
""",
(name, max_episode_size_mb, resolution_profile, imdbid, cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show_season(self, show_id: int, season_number: str, season_number_episodes: int) -> int:
"""Add tv show season
Args:
show_id (int): the tv show id
season_number (str): the season number
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_seasons(show_id,season_number, season_number_episodes)
VALUES(?,?,?)
""",
(show_id, season_number, season_number_episodes),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_season_episode(self, season_id: int, episode_name: str, episode_number: int, air_date: str) -> int:
"""Add a new episode
Args:
season_id (int): the season id
episode_name (str): the episode name
episode_number (int): the episode number
air_date (str): air_date
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_season_episodes(season_id, name, episode_number, air_date)
VALUES(?,?,?,?)
""",
(season_id, episode_name, episode_number, air_date),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def get_season_id(self, show_id: int, season_number: int) -> int:
"""Retrieves the season id from the show_id and season_number
Args:
show_id (int): the show id
season_number (int): the season number
Returns:
int: the season id
"""
cur = self.connection.cursor()
row = cur.execute(
"""
SELECT id FROM tv_show_seasons WHERE show_id=? AND season_number=?
""",
(show_id, season_number),
).fetchone()
return row['id']
def update_movie(self, id: int, **kwargs: dict) -> None:
"""Update a movie
Args:
id (int): The movie identifier
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
movie_table_columns = ["name", "max_size_mb",
"resolution_profile", "state", "hash", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in movie_table_columns:
raise Exception(
f"The key argument must be one of the following: {movie_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE movies SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show(self, id: int, **kwargs: dict) -> None:
"""Update a tv show
Args:
id (int): The tv show id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_shows_table_columns = ["name", "max_episode_size_mb",
"resolution_profile", "state", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_shows_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_shows_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_shows SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_show_season(self, id: int, **kwargs: dict) -> None:
"""Update a tv show season
Args:
id (int): The tv show season id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_show_season_table_columns = ["season_number",
"season_number_episodes", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_seasons SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show_season_episode(self, id: int, **kwargs: dict) -> None:
"""Update a tv show season episode
Args:
id (int): The tv show season episode id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_show_season_episode_table_columns = [
"season_id", "episode_number", "air_date", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_episode_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_episode_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_season_episodes SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def get_season_states(self, show_id: int) -> set:
"""Retrieves a set of all current season states for the specified show
Args:
show_id (int): the show id
Returns:
set: the set of season states
"""
cur = self.connection.cursor()
cur.execute(
"SELECT season_state FROM tv_shows_with_seasons_view WHERE show_id=?", (show_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['season_state'])
return state_set
def get_season_episodes_states(self, season_id) -> set:
"""Retrieves a set of all current season states for the specified show
Args:
show_id (int): the show id
Returns:
set: the set of season states
"""
cur = self.connection.cursor()
cur.execute(
"SELECT state FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['state'])
return state_set
def get_tv_show_season_numbers(self, show_id: int) -> set:
"""Get all seasons numbers of the specified tv show
Args:
show_id (int): the show id
Returns:
set: the tv show season numbers
"""
cur = self.connection.cursor()
cur.execute("SELECT season_number FROM tv_show_seasons WHERE show_id=?", (show_id,))
result = cur.fetchall()
seasons = set()
for row in result:
seasons.add(row['season_number'])
return seasons
def get_tv_show_season_episode_numbers(self, season_id: int) -> set:
"""Get all episode numbers for the specified season_id
Args:
show_id (int): the show id
Returns:
set: the episode numbers
"""
cur = self.connection.cursor()
cur.execute(
"SELECT episode_number FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
episodes = set()
for row in result:
episodes.add(row['episode_number'])
return episodes
def close(self) -> None:
"""Close database connection"""
self.connection.close()
def dict_factory(cursor, row) -> dict:
"""Transform tuple rows into a dictionary with column names and values
Args:
cursor: database cursor
row: row
Returns:
dict: a dictionary containing the column names as keys and the respective values
"""
output = {}
for idx, col in enumerate(cursor.description):
output[col[0]] = row[idx]
return output
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python database.py <db_path>", file=sys.stderr)
exit(0)
db = TBDatabase(sys.argv[1])
db.create_schema()
print(db.get_all_movies())
db.close()
| 33.28 | 124 | 0.575561 | import sqlite3
class TorrentState:
SEARCHING = "SEARCHING"
DOWNLOADING = "DOWNLOADING"
SEEDING = "SEEDING"
COMPLETED = "COMPLETED"
DELETING = "DELETING"
PAUSED = "PAUSED"
@staticmethod
def get_states() -> list:
return [
TorrentState.SEARCHING,
TorrentState.DOWNLOADING,
TorrentState.SEEDING,
TorrentState.COMPLETED,
TorrentState.DELETING,
TorrentState.PAUSED
]
class TBDatabase:
def __init__(self, db_file_path: str) -> None:
self.db_file_path = db_file_path
self.connection = sqlite3.connect(self.db_file_path)
self.connection.execute("PRAGMA foreign_keys = ON")
self.connection.row_factory = dict_factory
self.states = TorrentState()
def create_schema(self) -> None:
cur = self.connection.cursor()
sql = f"""CREATE TABLE IF NOT EXISTS movies (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"imdbid" INTEGER UNIQUE NOT NULL,
"cover_url" TEXT,
"hash" TEXT)
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_shows (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_episode_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"imdbid" INTEGER UNIQUE NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"cover_url" TEXT
)
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_seasons (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"show_id" INTEGER,
"season_number" INTEGER NOT NULL,
"season_number_episodes" INTEGER NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(show_id) REFERENCES tv_shows(id) ON DELETE CASCADE,
UNIQUE(show_id, season_number))
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_season_episodes (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"season_id" INTEGER,
"name" TEXT NOT NULL,
"episode_number" INTEGER NOT NULL,
"air_date" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(season_id) REFERENCES tv_show_seasons(id) ON DELETE CASCADE,
UNIQUE(season_id, episode_number))
"""
cur.execute(sql)
sql = f"""CREATE VIEW IF NOT EXISTS tv_shows_with_seasons_view
AS
SELECT
tv_shows.id as show_id,
tv_shows.name as show_name,
tv_shows.state as show_state,
tv_shows.resolution_profile as resolution_profile,
tv_shows.name as show_name,
tv_shows.max_episode_size_mb as max_episode_size_mb,
tv_shows.imdbid as show_imdbid,
tv_show_seasons.id as season_id,
tv_show_seasons.season_number as season_number,
tv_show_seasons.season_number_episodes as season_number_episodes,
tv_show_seasons.state as season_state,
tv_show_seasons.hash as season_hash
FROM tv_shows
INNER JOIN tv_show_seasons on tv_shows.id = tv_show_seasons.show_id;
"""
cur.execute(sql)
sql = f"""CREATE VIEW IF NOT EXISTS tv_show_seasons_with_episodes_view
AS
SELECT
tv_show_seasons.id as season_id,
tv_show_seasons.hash as season_hash,
tv_show_seasons.state as season_state,
tv_show_seasons.season_number as season_number,
tv_show_season_episodes.id as episode_id,
tv_show_season_episodes.name as episode_name,
tv_show_season_episodes.air_date as episode_air_date,
tv_show_season_episodes.episode_number as episode_number,
tv_show_season_episodes.state as episode_state,
tv_show_season_episodes.hash as episode_hash
FROM tv_show_seasons
INNER JOIN tv_show_season_episodes on tv_show_seasons.id = tv_show_season_episodes.season_id;
"""
cur.execute(sql)
self.connection.commit()
def get_all_movies(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies")
return cur.fetchall()
def get_all_tv_shows(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows;")
return cur.fetchall()
def get_all_seasons(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons;")
return cur.fetchall()
def get_all_episodes(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes;")
return cur.fetchall()
def get_all_tv_shows_with_seasons(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_with_seasons_view;")
return cur.fetchall()
def get_all_tv_shows_season_episodes(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_season_with_episodes_view;")
return cur.fetchall()
def get_movies_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE state=?", (state,))
return cur.fetchall()
def get_tv_shows_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE state=?", (state,))
return cur.fetchall()
def get_tv_show_with_seasons_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_shows_with_seasons_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_tv_show_seasons_with_episodes_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_show_seasons_with_episodes_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_movie(self, id: int) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show(self, id: int) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_season(self, id: str) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_with_seasons(self, id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * from tv_shows_with_seasons_view WHERE show_id=?", (id,))
return cur.fetchall()
def get_tv_show_season_with_episodes(self, id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * from tv_show_seasons_with_episodes_view WHERE season_id=?", (id,))
return cur.fetchall()
def get_season_episodes(self, season_id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
return result
def delete_movie(self, id: int) -> None:
cur = self.connection.cursor()
cur.execute("DELETE FROM movies WHERE id=?", (id,))
self.connection.commit()
def delete_tv_show(self, id: int) -> None:
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_shows WHERE id=?", (id,))
self.connection.commit()
def delete_season(self, id: int):
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_seasons WHERE id=?", (id,))
self.connection.commit()
def delete_episode(self, id: int):
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_season_episodes WHERE id=?", (id,))
self.connection.commit()
def add_movie(self, name: str, max_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO movies(name,max_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?,?)
""",
(name, max_size_mb, resolution_profile, imdbid,cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show(self, name: str, max_episode_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_shows(name,max_episode_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?, ?)
""",
(name, max_episode_size_mb, resolution_profile, imdbid, cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show_season(self, show_id: int, season_number: str, season_number_episodes: int) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_seasons(show_id,season_number, season_number_episodes)
VALUES(?,?,?)
""",
(show_id, season_number, season_number_episodes),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_season_episode(self, season_id: int, episode_name: str, episode_number: int, air_date: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_season_episodes(season_id, name, episode_number, air_date)
VALUES(?,?,?,?)
""",
(season_id, episode_name, episode_number, air_date),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def get_season_id(self, show_id: int, season_number: int) -> int:
cur = self.connection.cursor()
row = cur.execute(
"""
SELECT id FROM tv_show_seasons WHERE show_id=? AND season_number=?
""",
(show_id, season_number),
).fetchone()
return row['id']
def update_movie(self, id: int, **kwargs: dict) -> None:
movie_table_columns = ["name", "max_size_mb",
"resolution_profile", "state", "hash", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in movie_table_columns:
raise Exception(
f"The key argument must be one of the following: {movie_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE movies SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show(self, id: int, **kwargs: dict) -> None:
tv_shows_table_columns = ["name", "max_episode_size_mb",
"resolution_profile", "state", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_shows_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_shows_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_shows SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_show_season(self, id: int, **kwargs: dict) -> None:
tv_show_season_table_columns = ["season_number",
"season_number_episodes", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_seasons SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show_season_episode(self, id: int, **kwargs: dict) -> None:
tv_show_season_episode_table_columns = [
"season_id", "episode_number", "air_date", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_episode_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_episode_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_season_episodes SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def get_season_states(self, show_id: int) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT season_state FROM tv_shows_with_seasons_view WHERE show_id=?", (show_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['season_state'])
return state_set
def get_season_episodes_states(self, season_id) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT state FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['state'])
return state_set
def get_tv_show_season_numbers(self, show_id: int) -> set:
cur = self.connection.cursor()
cur.execute("SELECT season_number FROM tv_show_seasons WHERE show_id=?", (show_id,))
result = cur.fetchall()
seasons = set()
for row in result:
seasons.add(row['season_number'])
return seasons
def get_tv_show_season_episode_numbers(self, season_id: int) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT episode_number FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
episodes = set()
for row in result:
episodes.add(row['episode_number'])
return episodes
def close(self) -> None:
self.connection.close()
def dict_factory(cursor, row) -> dict:
output = {}
for idx, col in enumerate(cursor.description):
output[col[0]] = row[idx]
return output
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python database.py <db_path>", file=sys.stderr)
exit(0)
db = TBDatabase(sys.argv[1])
db.create_schema()
print(db.get_all_movies())
db.close()
| true | true |
f71b389bb26a910057e54795c0cf91314386c30f | 11,436 | py | Python | sparseConv/multitask/semseg/models/res16unet.py | ShengyuH/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 48 | 2020-03-02T23:05:59.000Z | 2022-02-22T11:23:17.000Z | sparseConv/multitask/semseg/models/res16unet.py | HenrryBryant/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 5 | 2020-10-29T14:19:04.000Z | 2022-01-25T05:33:59.000Z | sparseConv/multitask/semseg/models/res16unet.py | HenrryBryant/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 7 | 2020-06-18T05:23:01.000Z | 2021-05-13T01:26:32.000Z | from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockIN, BottleneckIN, BasicBlockLN
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config['bn_momentum']
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config['conv1_kernel_size'], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
# Experimentally, worse than others
class Res16UNetLN14(Res16UNet14):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
BLOCK = BasicBlockLN
class Res16UNetTemporalBase(Res16UNetBase):
"""
Res16UNet that can take 4D independently. No temporal convolution.
"""
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(Res16UNetTemporalBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class Res16UNetTemporal14(Res16UNet14, Res16UNetTemporalBase):
pass
class Res16UNetTemporal18(Res16UNet18, Res16UNetTemporalBase):
pass
class Res16UNetTemporal34(Res16UNet34, Res16UNetTemporalBase):
pass
class Res16UNetTemporal50(Res16UNet50, Res16UNetTemporalBase):
pass
class Res16UNetTemporal101(Res16UNet101, Res16UNetTemporalBase):
pass
class Res16UNetTemporalIN14(Res16UNetTemporal14):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN18(Res16UNetTemporal18):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN34(Res16UNetTemporal34):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN50(Res16UNetTemporal50):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class Res16UNetTemporalIN101(Res16UNetTemporal101):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| 26.411085 | 104 | 0.669902 | from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockIN, BottleneckIN, BasicBlockLN
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config['bn_momentum']
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config['conv1_kernel_size'], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class Res16UNetLN14(Res16UNet14):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
BLOCK = BasicBlockLN
class Res16UNetTemporalBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(Res16UNetTemporalBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class Res16UNetTemporal14(Res16UNet14, Res16UNetTemporalBase):
pass
class Res16UNetTemporal18(Res16UNet18, Res16UNetTemporalBase):
pass
class Res16UNetTemporal34(Res16UNet34, Res16UNetTemporalBase):
pass
class Res16UNetTemporal50(Res16UNet50, Res16UNetTemporalBase):
pass
class Res16UNetTemporal101(Res16UNet101, Res16UNetTemporalBase):
pass
class Res16UNetTemporalIN14(Res16UNetTemporal14):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN18(Res16UNetTemporal18):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN34(Res16UNetTemporal34):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN50(Res16UNetTemporal50):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class Res16UNetTemporalIN101(Res16UNetTemporal101):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| true | true |
f71b38a82f278340c42a84945a9e9d87c7755673 | 5,609 | py | Python | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | 5 | 2018-11-12T19:33:37.000Z | 2021-05-26T05:03:48.000Z | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | null | null | null | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | 1 | 2021-08-10T07:15:18.000Z | 2021-08-10T07:15:18.000Z | from pygtrie import CharTrie
import copy
"""
Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.This is the "example" module.
For example,
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('XiangGangDaXue')
[['Xiang', 'Gang', 'Da', 'Xue'], ['Xiang', 'Gang', 'Da', 'Xu', 'e'], ['Xi', 'ang', 'Gang', 'Da', 'Xue'], ['Xi', 'ang', 'Gang', 'Da', 'Xu', 'e']]
"""
class PinyinSplit:
"""Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.
It returns a list of all possible permutations of valid Pinyin words.
If the Pinyin phrase cannot be exhaustively split into valid Pinyin words, an empty list will be returned.
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('shediaoyingxiongchuan')
[['she', 'diao', 'ying', 'xiong', 'chuan'], ['she', 'diao', 'ying', 'xiong', 'chu', 'an'], ['she', 'di', 'ao', 'ying', 'xiong', 'chuan'], ['she', 'di', 'ao', 'ying', 'xiong', 'chu', 'an']]
>>> pys.split('shediaoyingxiongchuanxyz')
[]
"""
pylist = [
'a', 'ai', 'an', 'ang', 'ao',
'ba', 'bai', 'ban', 'bang', 'bao', 'bei', 'ben', 'beng',
'bi', 'bian', 'biang', 'biao', 'bie', 'bin', 'bing', 'bo', 'bu',
'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng',
'cha', 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng',
'chi', 'chong', 'chou', 'chu', 'chua', 'chuai', 'chuan', 'chuang', 'chui', 'chun', 'chuo',
'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo',
'da', 'dai', 'dan', 'dang', 'dao', 'de', 'dei', 'den', 'deng',
'di', 'dia', 'dian', 'diang', 'diao', 'die', 'ding', 'diu',
'dong', 'dou', 'du', 'duan', 'dui', 'dun', 'duo',
'e', 'ei', 'en', 'eng', 'er',
'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fiao',
'fo', 'fou', 'fu', 'ga', 'gai', 'gan', 'gang', 'gao',
'ge', 'gei', 'gen', 'geng', 'gong', 'gou',
'gu', 'gua', 'guai', 'guan', 'guang', 'gui', 'gun', 'guo',
'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen', 'heng',
'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun', 'huo',
'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'ju', 'juan', 'jue', 'jun',
'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'kei', 'ken', 'keng',
'kong', 'kou', 'ku', 'kua', 'kuai', 'kuan', 'kuang', 'kui', 'kun', 'kuo',
'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng',
'li', 'lia', 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou',
'lu', 'luan', 'lue', 'lun', 'luo', 'lv', 'lve', 'lvn', 'lü', 'lüe', 'lün',
'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng',
'mi', 'mian', 'miao', 'mie', 'min', 'ming', 'miu', 'mo', 'mou', 'mu',
'na', 'nai', 'nan', 'nang', 'nao', 'ne', 'nei', 'nen', 'neng',
'ni', 'nia', 'nian', 'niang', 'niao', 'nie', 'nin', 'ning', 'niu',
'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'nv', 'nve', 'nü', 'nüe', 'ou',
'pa', 'pai', 'pan', 'pang', 'pao', 'pei', 'pen', 'peng',
'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',
'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie',
'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',
'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou',
'ru', 'rua', 'ruan', 'rui', 'run', 'ruo',
'sa', 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng',
'sha', 'shai', 'shan', 'shang', 'shao', 'she', 'shei', 'shen', 'sheng', 'shi',
'shong', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo',
'si', 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo',
'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'tei', 'teng',
'ti', 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou',
'tu', 'tuan', 'tui', 'tun', 'tuo',
'wa', 'wai', 'wan', 'wang', 'wei', 'wen', 'weng', 'wo', 'wu',
'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing', 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun',
'ya', 'yai', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying',
'yo', 'yong', 'you', 'yu', 'yuan', 'yue', 'yun',
'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng',
'zha', 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhei', 'zhen', 'zheng',
'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai', 'zhuan', 'zhuang', 'zhui', 'zhun', 'zhuo',
'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'ê'
]
def __init__(self):
self.trie = CharTrie()
for py in self.pylist:
self.trie[py] = len(py)
def split(self, phrase):
phrase_lc = phrase.lower()
split_list = []
results = []
if phrase:
split_list.append((phrase, phrase_lc, []))
while split_list:
pair = split_list.pop()
phrase = pair[0]
phrase_lc = pair[1]
words = pair[2]
matches = self.trie.prefixes(phrase_lc)
for match in matches:
n = match[1]
word = phrase[:n]
tail = phrase[n:]
tail_lc = phrase_lc[n:]
words_copy = copy.deepcopy(words)
words_copy.append(word)
if tail:
split_list.append((tail, tail_lc, words_copy))
else:
results.append(words_copy)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 50.080357 | 192 | 0.456409 | from pygtrie import CharTrie
import copy
class PinyinSplit:
pylist = [
'a', 'ai', 'an', 'ang', 'ao',
'ba', 'bai', 'ban', 'bang', 'bao', 'bei', 'ben', 'beng',
'bi', 'bian', 'biang', 'biao', 'bie', 'bin', 'bing', 'bo', 'bu',
'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng',
'cha', 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng',
'chi', 'chong', 'chou', 'chu', 'chua', 'chuai', 'chuan', 'chuang', 'chui', 'chun', 'chuo',
'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo',
'da', 'dai', 'dan', 'dang', 'dao', 'de', 'dei', 'den', 'deng',
'di', 'dia', 'dian', 'diang', 'diao', 'die', 'ding', 'diu',
'dong', 'dou', 'du', 'duan', 'dui', 'dun', 'duo',
'e', 'ei', 'en', 'eng', 'er',
'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fiao',
'fo', 'fou', 'fu', 'ga', 'gai', 'gan', 'gang', 'gao',
'ge', 'gei', 'gen', 'geng', 'gong', 'gou',
'gu', 'gua', 'guai', 'guan', 'guang', 'gui', 'gun', 'guo',
'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen', 'heng',
'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun', 'huo',
'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'ju', 'juan', 'jue', 'jun',
'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'kei', 'ken', 'keng',
'kong', 'kou', 'ku', 'kua', 'kuai', 'kuan', 'kuang', 'kui', 'kun', 'kuo',
'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng',
'li', 'lia', 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou',
'lu', 'luan', 'lue', 'lun', 'luo', 'lv', 'lve', 'lvn', 'lü', 'lüe', 'lün',
'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng',
'mi', 'mian', 'miao', 'mie', 'min', 'ming', 'miu', 'mo', 'mou', 'mu',
'na', 'nai', 'nan', 'nang', 'nao', 'ne', 'nei', 'nen', 'neng',
'ni', 'nia', 'nian', 'niang', 'niao', 'nie', 'nin', 'ning', 'niu',
'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'nv', 'nve', 'nü', 'nüe', 'ou',
'pa', 'pai', 'pan', 'pang', 'pao', 'pei', 'pen', 'peng',
'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',
'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie',
'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',
'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou',
'ru', 'rua', 'ruan', 'rui', 'run', 'ruo',
'sa', 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng',
'sha', 'shai', 'shan', 'shang', 'shao', 'she', 'shei', 'shen', 'sheng', 'shi',
'shong', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo',
'si', 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo',
'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'tei', 'teng',
'ti', 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou',
'tu', 'tuan', 'tui', 'tun', 'tuo',
'wa', 'wai', 'wan', 'wang', 'wei', 'wen', 'weng', 'wo', 'wu',
'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing', 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun',
'ya', 'yai', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying',
'yo', 'yong', 'you', 'yu', 'yuan', 'yue', 'yun',
'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng',
'zha', 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhei', 'zhen', 'zheng',
'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai', 'zhuan', 'zhuang', 'zhui', 'zhun', 'zhuo',
'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'ê'
]
def __init__(self):
self.trie = CharTrie()
for py in self.pylist:
self.trie[py] = len(py)
def split(self, phrase):
phrase_lc = phrase.lower()
split_list = []
results = []
if phrase:
split_list.append((phrase, phrase_lc, []))
while split_list:
pair = split_list.pop()
phrase = pair[0]
phrase_lc = pair[1]
words = pair[2]
matches = self.trie.prefixes(phrase_lc)
for match in matches:
n = match[1]
word = phrase[:n]
tail = phrase[n:]
tail_lc = phrase_lc[n:]
words_copy = copy.deepcopy(words)
words_copy.append(word)
if tail:
split_list.append((tail, tail_lc, words_copy))
else:
results.append(words_copy)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | true | true |
f71b38de84f34b526f5b92cf304f242735c04cdf | 2,862 | py | Python | scripts/siege.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | 1 | 2015-12-01T03:53:51.000Z | 2015-12-01T03:53:51.000Z | scripts/siege.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | 6 | 2021-02-02T23:08:48.000Z | 2021-09-08T02:47:17.000Z | scripts/siege.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | 1 | 2021-03-13T00:33:12.000Z | 2021-03-13T00:33:12.000Z | """
A script for generating siege files with a bunch of URL variations.
"""
import re
import sys
part_re = re.compile(r'\{([-\w]+)\}')
AMO_LANGUAGES = (
'af', 'ar', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa', 'fi',
'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sq', 'sr', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
config = {
'base': [],
'locale': AMO_LANGUAGES,
'app': ['firefox'],
'extension-slug': [''] + """
alerts-and-updates appearance bookmarks download-management
feeds-news-blogging language-support photos-music-videos
privacy-security social-communication tabs toolbars web-development
other""".split(),
'theme-slug': [''] + """
animals compact large miscellaneous modern nature os-integration retro
sports""".split(),
'theme-sort': 'name updated created downloads rating'.split(),
'page': '1 2'.split(),
'exp': 'on off'.split(),
'personas-slug': [''] + """
abstract causes fashion firefox foxkeh holiday music nature other
scenery seasonal solid sports websites""".split(),
'personas-sort': """up-and-coming created popular rating""".split()
}
root = '{base}/{locale}/{app}'
templates = t = {
'root': '/',
'extensions': '/extensions/{extension-slug}/',
'language-tools': '/language-tools',
'themes': '/themes/{theme-slug}?sort={theme-sort}&page={page}',
'personas': '/personas/{personas-slug}',
}
t['themes-unreviewed'] = t['themes'] + '&unreviewed={exp}'
t['personas-sort'] = t['personas'] + '?sort={personas-sort}'
t['extensions-sort'] = t['extensions'] + '?sort={theme-sort}'
t['extensions-featured'] = t['extensions'] + 'featured'
for key, value in templates.items():
templates[key] = root + value
def combos(s, parts):
def _rec(s, parts, kw):
key, rest = parts[0], parts[1:]
rv = []
for opt in config[key]:
kw[key] = opt
if not rest:
rv.append(s.format(**kw))
else:
rv.extend(_rec(s, rest, kw))
return rv
return _rec(s, parts, {})
def gen(choices=templates):
rv = []
for template in choices:
parts = part_re.findall(template)
rv.extend(combos(template, parts))
return rv
def main():
args = sys.argv
try:
base, choices = sys.argv[1], args[2:] or templates.keys()
except IndexError:
print 'Usage: python siege.py <BASE> [%s]' % (', '.join(templates))
print '\nBASE should be something like "http://localhost:8000/z".'
print 'The remaining arguments are names of url templates.'
sys.exit(1)
config['base'] = [base.rstrip('/')]
print '\n'.join(gen(templates[k] for k in choices))
if __name__ == '__main__':
main()
| 29.204082 | 78 | 0.571279 | """
A script for generating siege files with a bunch of URL variations.
"""
import re
import sys
part_re = re.compile(r'\{([-\w]+)\}')
AMO_LANGUAGES = (
'af', 'ar', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa', 'fi',
'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sq', 'sr', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
config = {
'base': [],
'locale': AMO_LANGUAGES,
'app': ['firefox'],
'extension-slug': [''] + """
alerts-and-updates appearance bookmarks download-management
feeds-news-blogging language-support photos-music-videos
privacy-security social-communication tabs toolbars web-development
other""".split(),
'theme-slug': [''] + """
animals compact large miscellaneous modern nature os-integration retro
sports""".split(),
'theme-sort': 'name updated created downloads rating'.split(),
'page': '1 2'.split(),
'exp': 'on off'.split(),
'personas-slug': [''] + """
abstract causes fashion firefox foxkeh holiday music nature other
scenery seasonal solid sports websites""".split(),
'personas-sort': """up-and-coming created popular rating""".split()
}
root = '{base}/{locale}/{app}'
templates = t = {
'root': '/',
'extensions': '/extensions/{extension-slug}/',
'language-tools': '/language-tools',
'themes': '/themes/{theme-slug}?sort={theme-sort}&page={page}',
'personas': '/personas/{personas-slug}',
}
t['themes-unreviewed'] = t['themes'] + '&unreviewed={exp}'
t['personas-sort'] = t['personas'] + '?sort={personas-sort}'
t['extensions-sort'] = t['extensions'] + '?sort={theme-sort}'
t['extensions-featured'] = t['extensions'] + 'featured'
for key, value in templates.items():
templates[key] = root + value
def combos(s, parts):
def _rec(s, parts, kw):
key, rest = parts[0], parts[1:]
rv = []
for opt in config[key]:
kw[key] = opt
if not rest:
rv.append(s.format(**kw))
else:
rv.extend(_rec(s, rest, kw))
return rv
return _rec(s, parts, {})
def gen(choices=templates):
rv = []
for template in choices:
parts = part_re.findall(template)
rv.extend(combos(template, parts))
return rv
def main():
args = sys.argv
try:
base, choices = sys.argv[1], args[2:] or templates.keys()
except IndexError:
print 'Usage: python siege.py <BASE> [%s]' % (', '.join(templates))
print '\nBASE should be something like "http://localhost:8000/z".'
print 'The remaining arguments are names of url templates.'
sys.exit(1)
config['base'] = [base.rstrip('/')]
print '\n'.join(gen(templates[k] for k in choices))
if __name__ == '__main__':
main()
| false | true |
f71b39087446ecc9cc6e057576d78b80e52404ee | 340 | py | Python | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 3 | 2020-10-15T07:46:48.000Z | 2021-09-06T20:49:05.000Z | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 5 | 2021-01-27T11:47:12.000Z | 2021-08-30T08:49:37.000Z | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 1 | 2022-03-15T09:29:19.000Z | 2022-03-15T09:29:19.000Z | """chenv."""
try:
from importlib.metadata import version, PackageNotFoundError # type: ignore
except ImportError: # pragma: no cover
from importlib_metadata import version, PackageNotFoundError # type: ignore
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
| 28.333333 | 80 | 0.744118 | try:
from importlib.metadata import version, PackageNotFoundError
except ImportError:
from importlib_metadata import version, PackageNotFoundError
try:
__version__ = version(__name__)
except PackageNotFoundError:
__version__ = "unknown"
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.