hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71d1b1bc09a0b50245f87e83c6dbaa93b21299d | 55,644 | py | Python | Lib/idlelib/idle_test/test_configdialog.py | emsoftware/cpython | 457ce60fc70f1c9290023f46fb82b6a490dff32e | [
"0BSD"
] | null | null | null | Lib/idlelib/idle_test/test_configdialog.py | emsoftware/cpython | 457ce60fc70f1c9290023f46fb82b6a490dff32e | [
"0BSD"
] | 5 | 2021-07-01T03:02:20.000Z | 2022-03-01T03:03:48.000Z | Lib/idlelib/idle_test/test_configdialog.py | emsoftware/cpython | 457ce60fc70f1c9290023f46fb82b6a490dff32e | [
"0BSD"
] | null | null | null | """Test configdialog, coverage 94%.
Half the class creates dialog, half works with user customizations.
"""
from idlelib import configdialog
from test.support import requires
requires('gui')
import unittest
from unittest import mock
from idlelib.idle_test.mock_idle import Func
from tkinter import (Tk, StringVar, IntVar, BooleanVar, DISABLED, NORMAL)
from idlelib import config
from idlelib.configdialog import idleConf, changes, tracers
# Tests should not depend on fortuitous user configurations.
# They must not affect actual user .cfg files.
# Use solution from test_config: empty parsers with no filename.
usercfg = idleConf.userCfg
testcfg = {
'main': config.IdleUserConfParser(''),
'highlight': config.IdleUserConfParser(''),
'keys': config.IdleUserConfParser(''),
'extensions': config.IdleUserConfParser(''),
}
root = None
dialog = None
mainpage = changes['main']
highpage = changes['highlight']
keyspage = changes['keys']
extpage = changes['extensions']
def setUpModule():
global root, dialog
idleConf.userCfg = testcfg
root = Tk()
# root.withdraw() # Comment out, see issue 30870
dialog = configdialog.ConfigDialog(root, 'Test', _utest=True)
def tearDownModule():
global root, dialog
idleConf.userCfg = usercfg
tracers.detach()
tracers.clear()
changes.clear()
root.update_idletasks()
root.destroy()
root = dialog = None
class ConfigDialogTest(unittest.TestCase):
def test_deactivate_current_config(self):
pass
def activate_config_changes(self):
pass
class ButtonTest(unittest.TestCase):
def test_click_ok(self):
d = dialog
apply = d.apply = mock.Mock()
destroy = d.destroy = mock.Mock()
d.buttons['Ok'].invoke()
apply.assert_called_once()
destroy.assert_called_once()
del d.destroy, d.apply
def test_click_apply(self):
d = dialog
deactivate = d.deactivate_current_config = mock.Mock()
save_ext = d.save_all_changed_extensions = mock.Mock()
activate = d.activate_config_changes = mock.Mock()
d.buttons['Apply'].invoke()
deactivate.assert_called_once()
save_ext.assert_called_once()
activate.assert_called_once()
del d.save_all_changed_extensions
del d.activate_config_changes, d.deactivate_current_config
def test_click_cancel(self):
d = dialog
d.destroy = Func()
changes['main']['something'] = 1
d.buttons['Cancel'].invoke()
self.assertEqual(changes['main'], {})
self.assertEqual(d.destroy.called, 1)
del d.destroy
def test_click_help(self):
dialog.note.select(dialog.keyspage)
with mock.patch.object(configdialog, 'view_text',
new_callable=Func) as view:
dialog.buttons['Help'].invoke()
title, contents = view.kwds['title'], view.kwds['contents']
self.assertEqual(title, 'Help for IDLE preferences')
self.assertTrue(contents.startswith('When you click') and
contents.endswith('a different name.\n'))
class FontPageTest(unittest.TestCase):
"""Test that font widgets enable users to make font changes.
Test that widget actions set vars, that var changes add three
options to changes and call set_samples, and that set_samples
changes the font of both sample boxes.
"""
@classmethod
def setUpClass(cls):
page = cls.page = dialog.fontpage
dialog.note.select(page)
page.set_samples = Func() # Mask instance method.
page.update()
@classmethod
def tearDownClass(cls):
del cls.page.set_samples # Unmask instance method.
def setUp(self):
changes.clear()
def test_load_font_cfg(self):
# Leave widget load test to human visual check.
# TODO Improve checks when add IdleConf.get_font_values.
tracers.detach()
d = self.page
d.font_name.set('Fake')
d.font_size.set('1')
d.font_bold.set(True)
d.set_samples.called = 0
d.load_font_cfg()
self.assertNotEqual(d.font_name.get(), 'Fake')
self.assertNotEqual(d.font_size.get(), '1')
self.assertFalse(d.font_bold.get())
self.assertEqual(d.set_samples.called, 1)
tracers.attach()
def test_fontlist_key(self):
# Up and Down keys should select a new font.
d = self.page
if d.fontlist.size() < 2:
self.skipTest('need at least 2 fonts')
fontlist = d.fontlist
fontlist.activate(0)
font = d.fontlist.get('active')
# Test Down key.
fontlist.focus_force()
fontlist.update()
fontlist.event_generate('<Key-Down>')
fontlist.event_generate('<KeyRelease-Down>')
down_font = fontlist.get('active')
self.assertNotEqual(down_font, font)
self.assertIn(d.font_name.get(), down_font.lower())
# Test Up key.
fontlist.focus_force()
fontlist.update()
fontlist.event_generate('<Key-Up>')
fontlist.event_generate('<KeyRelease-Up>')
up_font = fontlist.get('active')
self.assertEqual(up_font, font)
self.assertIn(d.font_name.get(), up_font.lower())
def test_fontlist_mouse(self):
# Click on item should select that item.
d = self.page
if d.fontlist.size() < 2:
self.skipTest('need at least 2 fonts')
fontlist = d.fontlist
fontlist.activate(0)
# Select next item in listbox
fontlist.focus_force()
fontlist.see(1)
fontlist.update()
x, y, dx, dy = fontlist.bbox(1)
x += dx // 2
y += dy // 2
fontlist.event_generate('<Button-1>', x=x, y=y)
fontlist.event_generate('<ButtonRelease-1>', x=x, y=y)
font1 = fontlist.get(1)
select_font = fontlist.get('anchor')
self.assertEqual(select_font, font1)
self.assertIn(d.font_name.get(), font1.lower())
def test_sizelist(self):
# Click on number should select that number
d = self.page
d.sizelist.variable.set(40)
self.assertEqual(d.font_size.get(), '40')
def test_bold_toggle(self):
# Click on checkbutton should invert it.
d = self.page
d.font_bold.set(False)
d.bold_toggle.invoke()
self.assertTrue(d.font_bold.get())
d.bold_toggle.invoke()
self.assertFalse(d.font_bold.get())
def test_font_set(self):
# Test that setting a font Variable results in 3 provisional
# change entries and a call to set_samples. Use values sure to
# not be defaults.
default_font = idleConf.GetFont(root, 'main', 'EditorWindow')
default_size = str(default_font[1])
default_bold = default_font[2] == 'bold'
d = self.page
d.font_size.set(default_size)
d.font_bold.set(default_bold)
d.set_samples.called = 0
d.font_name.set('Test Font')
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': default_size,
'font-bold': str(default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 1)
changes.clear()
d.font_size.set('20')
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': '20',
'font-bold': str(default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 2)
changes.clear()
d.font_bold.set(not default_bold)
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': '20',
'font-bold': str(not default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 3)
def test_set_samples(self):
d = self.page
del d.set_samples # Unmask method for test
orig_samples = d.font_sample, d.highlight_sample
d.font_sample, d.highlight_sample = {}, {}
d.font_name.set('test')
d.font_size.set('5')
d.font_bold.set(1)
expected = {'font': ('test', '5', 'bold')}
# Test set_samples.
d.set_samples()
self.assertTrue(d.font_sample == d.highlight_sample == expected)
d.font_sample, d.highlight_sample = orig_samples
d.set_samples = Func() # Re-mask for other tests.
class IndentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.page = dialog.fontpage
cls.page.update()
def test_load_tab_cfg(self):
d = self.page
d.space_num.set(16)
d.load_tab_cfg()
self.assertEqual(d.space_num.get(), 4)
def test_indent_scale(self):
d = self.page
changes.clear()
d.indent_scale.set(20)
self.assertEqual(d.space_num.get(), 16)
self.assertEqual(mainpage, {'Indent': {'num-spaces': '16'}})
class HighPageTest(unittest.TestCase):
"""Test that highlight tab widgets enable users to make changes.
Test that widget actions set vars, that var changes add
options to changes and that themes work correctly.
"""
@classmethod
def setUpClass(cls):
page = cls.page = dialog.highpage
dialog.note.select(page)
page.set_theme_type = Func()
page.paint_theme_sample = Func()
page.set_highlight_target = Func()
page.set_color_sample = Func()
page.update()
@classmethod
def tearDownClass(cls):
d = cls.page
del d.set_theme_type, d.paint_theme_sample
del d.set_highlight_target, d.set_color_sample
def setUp(self):
d = self.page
# The following is needed for test_load_key_cfg, _delete_custom_keys.
# This may indicate a defect in some test or function.
for section in idleConf.GetSectionList('user', 'highlight'):
idleConf.userCfg['highlight'].remove_section(section)
changes.clear()
d.set_theme_type.called = 0
d.paint_theme_sample.called = 0
d.set_highlight_target.called = 0
d.set_color_sample.called = 0
def test_load_theme_cfg(self):
tracers.detach()
d = self.page
eq = self.assertEqual
# Use builtin theme with no user themes created.
idleConf.CurrentTheme = mock.Mock(return_value='IDLE Classic')
d.load_theme_cfg()
self.assertTrue(d.theme_source.get())
# builtinlist sets variable builtin_name to the CurrentTheme default.
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), '- no custom themes -')
eq(d.custom_theme_on.state(), ('disabled',))
eq(d.set_theme_type.called, 1)
eq(d.paint_theme_sample.called, 1)
eq(d.set_highlight_target.called, 1)
# Builtin theme with non-empty user theme list.
idleConf.SetOption('highlight', 'test1', 'option', 'value')
idleConf.SetOption('highlight', 'test2', 'option2', 'value2')
d.load_theme_cfg()
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), 'test1')
eq(d.set_theme_type.called, 2)
eq(d.paint_theme_sample.called, 2)
eq(d.set_highlight_target.called, 2)
# Use custom theme.
idleConf.CurrentTheme = mock.Mock(return_value='test2')
idleConf.SetOption('main', 'Theme', 'default', '0')
d.load_theme_cfg()
self.assertFalse(d.theme_source.get())
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), 'test2')
eq(d.set_theme_type.called, 3)
eq(d.paint_theme_sample.called, 3)
eq(d.set_highlight_target.called, 3)
del idleConf.CurrentTheme
tracers.attach()
def test_theme_source(self):
eq = self.assertEqual
d = self.page
# Test these separately.
d.var_changed_builtin_name = Func()
d.var_changed_custom_name = Func()
# Builtin selected.
d.builtin_theme_on.invoke()
eq(mainpage, {'Theme': {'default': 'True'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 0)
changes.clear()
# Custom selected.
d.custom_theme_on.state(('!disabled',))
d.custom_theme_on.invoke()
self.assertEqual(mainpage, {'Theme': {'default': 'False'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 1)
del d.var_changed_builtin_name, d.var_changed_custom_name
def test_builtin_name(self):
eq = self.assertEqual
d = self.page
item_list = ['IDLE Classic', 'IDLE Dark', 'IDLE New']
# Not in old_themes, defaults name to first item.
idleConf.SetOption('main', 'Theme', 'name', 'spam')
d.builtinlist.SetMenu(item_list, 'IDLE Dark')
eq(mainpage, {'Theme': {'name': 'IDLE Classic',
'name2': 'IDLE Dark'}})
eq(d.theme_message['text'], 'New theme, see Help')
eq(d.paint_theme_sample.called, 1)
# Not in old themes - uses name2.
changes.clear()
idleConf.SetOption('main', 'Theme', 'name', 'IDLE New')
d.builtinlist.SetMenu(item_list, 'IDLE Dark')
eq(mainpage, {'Theme': {'name2': 'IDLE Dark'}})
eq(d.theme_message['text'], 'New theme, see Help')
eq(d.paint_theme_sample.called, 2)
# Builtin name in old_themes.
changes.clear()
d.builtinlist.SetMenu(item_list, 'IDLE Classic')
eq(mainpage, {'Theme': {'name': 'IDLE Classic', 'name2': ''}})
eq(d.theme_message['text'], '')
eq(d.paint_theme_sample.called, 3)
def test_custom_name(self):
d = self.page
# If no selections, doesn't get added.
d.customlist.SetMenu([], '- no custom themes -')
self.assertNotIn('Theme', mainpage)
self.assertEqual(d.paint_theme_sample.called, 0)
# Custom name selected.
changes.clear()
d.customlist.SetMenu(['a', 'b', 'c'], 'c')
self.assertEqual(mainpage, {'Theme': {'name': 'c'}})
self.assertEqual(d.paint_theme_sample.called, 1)
def test_color(self):
d = self.page
d.on_new_color_set = Func()
# self.color is only set in get_color through colorchooser.
d.color.set('green')
self.assertEqual(d.on_new_color_set.called, 1)
del d.on_new_color_set
def test_highlight_target_list_mouse(self):
# Set highlight_target through targetlist.
eq = self.assertEqual
d = self.page
d.targetlist.SetMenu(['a', 'b', 'c'], 'c')
eq(d.highlight_target.get(), 'c')
eq(d.set_highlight_target.called, 1)
def test_highlight_target_text_mouse(self):
# Set highlight_target through clicking highlight_sample.
eq = self.assertEqual
d = self.page
elem = {}
count = 0
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
def tag_to_element(elem):
for element, tag in d.theme_elements.items():
elem[tag[0]] = element
def click_it(start):
x, y, dx, dy = hs.bbox(start)
x += dx // 2
y += dy // 2
hs.event_generate('<Enter>', x=0, y=0)
hs.event_generate('<Motion>', x=x, y=y)
hs.event_generate('<ButtonPress-1>', x=x, y=y)
hs.event_generate('<ButtonRelease-1>', x=x, y=y)
# Flip theme_elements to make the tag the key.
tag_to_element(elem)
# If highlight_sample has a tag that isn't in theme_elements, there
# will be a KeyError in the test run.
for tag in hs.tag_names():
for start_index in hs.tag_ranges(tag)[0::2]:
count += 1
click_it(start_index)
eq(d.highlight_target.get(), elem[tag])
eq(d.set_highlight_target.called, count)
def test_highlight_sample_double_click(self):
# Test double click on highlight_sample.
eq = self.assertEqual
d = self.page
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
# Test binding from configdialog.
hs.event_generate('<Enter>', x=0, y=0)
hs.event_generate('<Motion>', x=0, y=0)
# Double click is a sequence of two clicks in a row.
for _ in range(2):
hs.event_generate('<ButtonPress-1>', x=0, y=0)
hs.event_generate('<ButtonRelease-1>', x=0, y=0)
eq(hs.tag_ranges('sel'), ())
def test_highlight_sample_b1_motion(self):
# Test button motion on highlight_sample.
eq = self.assertEqual
d = self.page
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
x, y, dx, dy, offset = hs.dlineinfo('1.0')
# Test binding from configdialog.
hs.event_generate('<Leave>')
hs.event_generate('<Enter>')
hs.event_generate('<Motion>', x=x, y=y)
hs.event_generate('<ButtonPress-1>', x=x, y=y)
hs.event_generate('<B1-Motion>', x=dx, y=dy)
hs.event_generate('<ButtonRelease-1>', x=dx, y=dy)
eq(hs.tag_ranges('sel'), ())
def test_set_theme_type(self):
eq = self.assertEqual
d = self.page
del d.set_theme_type
# Builtin theme selected.
d.theme_source.set(True)
d.set_theme_type()
eq(d.builtinlist['state'], NORMAL)
eq(d.customlist['state'], DISABLED)
eq(d.button_delete_custom.state(), ('disabled',))
# Custom theme selected.
d.theme_source.set(False)
d.set_theme_type()
eq(d.builtinlist['state'], DISABLED)
eq(d.custom_theme_on.state(), ('selected',))
eq(d.customlist['state'], NORMAL)
eq(d.button_delete_custom.state(), ())
d.set_theme_type = Func()
def test_get_color(self):
eq = self.assertEqual
d = self.page
orig_chooser = configdialog.colorchooser.askcolor
chooser = configdialog.colorchooser.askcolor = Func()
gntn = d.get_new_theme_name = Func()
d.highlight_target.set('Editor Breakpoint')
d.color.set('#ffffff')
# Nothing selected.
chooser.result = (None, None)
d.button_set_color.invoke()
eq(d.color.get(), '#ffffff')
# Selection same as previous color.
chooser.result = ('', d.style.lookup(d.frame_color_set['style'], 'background'))
d.button_set_color.invoke()
eq(d.color.get(), '#ffffff')
# Select different color.
chooser.result = ((222.8671875, 0.0, 0.0), '#de0000')
# Default theme.
d.color.set('#ffffff')
d.theme_source.set(True)
# No theme name selected therefore color not saved.
gntn.result = ''
d.button_set_color.invoke()
eq(gntn.called, 1)
eq(d.color.get(), '#ffffff')
# Theme name selected.
gntn.result = 'My New Theme'
d.button_set_color.invoke()
eq(d.custom_name.get(), gntn.result)
eq(d.color.get(), '#de0000')
# Custom theme.
d.color.set('#ffffff')
d.theme_source.set(False)
d.button_set_color.invoke()
eq(d.color.get(), '#de0000')
del d.get_new_theme_name
configdialog.colorchooser.askcolor = orig_chooser
def test_on_new_color_set(self):
d = self.page
color = '#3f7cae'
d.custom_name.set('Python')
d.highlight_target.set('Selected Text')
d.fg_bg_toggle.set(True)
d.color.set(color)
self.assertEqual(d.style.lookup(d.frame_color_set['style'], 'background'), color)
self.assertEqual(d.highlight_sample.tag_cget('hilite', 'foreground'), color)
self.assertEqual(highpage,
{'Python': {'hilite-foreground': color}})
def test_get_new_theme_name(self):
orig_sectionname = configdialog.SectionName
sn = configdialog.SectionName = Func(return_self=True)
d = self.page
sn.result = 'New Theme'
self.assertEqual(d.get_new_theme_name(''), 'New Theme')
configdialog.SectionName = orig_sectionname
def test_save_as_new_theme(self):
d = self.page
gntn = d.get_new_theme_name = Func()
d.theme_source.set(True)
# No name entered.
gntn.result = ''
d.button_save_custom.invoke()
self.assertNotIn(gntn.result, idleConf.userCfg['highlight'])
# Name entered.
gntn.result = 'my new theme'
gntn.called = 0
self.assertNotIn(gntn.result, idleConf.userCfg['highlight'])
d.button_save_custom.invoke()
self.assertIn(gntn.result, idleConf.userCfg['highlight'])
del d.get_new_theme_name
def test_create_new_and_save_new(self):
eq = self.assertEqual
d = self.page
# Use default as previously active theme.
d.theme_source.set(True)
d.builtin_name.set('IDLE Classic')
first_new = 'my new custom theme'
second_new = 'my second custom theme'
# No changes, so themes are an exact copy.
self.assertNotIn(first_new, idleConf.userCfg)
d.create_new(first_new)
eq(idleConf.GetSectionList('user', 'highlight'), [first_new])
eq(idleConf.GetThemeDict('default', 'IDLE Classic'),
idleConf.GetThemeDict('user', first_new))
eq(d.custom_name.get(), first_new)
self.assertFalse(d.theme_source.get()) # Use custom set.
eq(d.set_theme_type.called, 1)
# Test that changed targets are in new theme.
changes.add_option('highlight', first_new, 'hit-background', 'yellow')
self.assertNotIn(second_new, idleConf.userCfg)
d.create_new(second_new)
eq(idleConf.GetSectionList('user', 'highlight'), [first_new, second_new])
self.assertNotEqual(idleConf.GetThemeDict('user', first_new),
idleConf.GetThemeDict('user', second_new))
# Check that difference in themes was in `hit-background` from `changes`.
idleConf.SetOption('highlight', first_new, 'hit-background', 'yellow')
eq(idleConf.GetThemeDict('user', first_new),
idleConf.GetThemeDict('user', second_new))
def test_set_highlight_target(self):
eq = self.assertEqual
d = self.page
del d.set_highlight_target
# Target is cursor.
d.highlight_target.set('Cursor')
eq(d.fg_on.state(), ('disabled', 'selected'))
eq(d.bg_on.state(), ('disabled',))
self.assertTrue(d.fg_bg_toggle)
eq(d.set_color_sample.called, 1)
# Target is not cursor.
d.highlight_target.set('Comment')
eq(d.fg_on.state(), ('selected',))
eq(d.bg_on.state(), ())
self.assertTrue(d.fg_bg_toggle)
eq(d.set_color_sample.called, 2)
d.set_highlight_target = Func()
def test_set_color_sample_binding(self):
d = self.page
scs = d.set_color_sample
d.fg_on.invoke()
self.assertEqual(scs.called, 1)
d.bg_on.invoke()
self.assertEqual(scs.called, 2)
def test_set_color_sample(self):
d = self.page
del d.set_color_sample
d.highlight_target.set('Selected Text')
d.fg_bg_toggle.set(True)
d.set_color_sample()
self.assertEqual(
d.style.lookup(d.frame_color_set['style'], 'background'),
d.highlight_sample.tag_cget('hilite', 'foreground'))
d.set_color_sample = Func()
def test_paint_theme_sample(self):
eq = self.assertEqual
page = self.page
del page.paint_theme_sample # Delete masking mock.
hs_tag = page.highlight_sample.tag_cget
gh = idleConf.GetHighlight
# Create custom theme based on IDLE Dark.
page.theme_source.set(True)
page.builtin_name.set('IDLE Dark')
theme = 'IDLE Test'
page.create_new(theme)
page.set_color_sample.called = 0
# Base theme with nothing in `changes`.
page.paint_theme_sample()
new_console = {'foreground': 'blue',
'background': 'yellow',}
for key, value in new_console.items():
self.assertNotEqual(hs_tag('console', key), value)
eq(page.set_color_sample.called, 1)
# Apply changes.
for key, value in new_console.items():
changes.add_option('highlight', theme, 'console-'+key, value)
page.paint_theme_sample()
for key, value in new_console.items():
eq(hs_tag('console', key), value)
eq(page.set_color_sample.called, 2)
page.paint_theme_sample = Func()
def test_delete_custom(self):
eq = self.assertEqual
d = self.page
d.button_delete_custom.state(('!disabled',))
yesno = d.askyesno = Func()
dialog.deactivate_current_config = Func()
dialog.activate_config_changes = Func()
theme_name = 'spam theme'
idleConf.userCfg['highlight'].SetOption(theme_name, 'name', 'value')
highpage[theme_name] = {'option': 'True'}
theme_name2 = 'other theme'
idleConf.userCfg['highlight'].SetOption(theme_name2, 'name', 'value')
highpage[theme_name2] = {'option': 'False'}
# Force custom theme.
d.custom_theme_on.state(('!disabled',))
d.custom_theme_on.invoke()
d.custom_name.set(theme_name)
# Cancel deletion.
yesno.result = False
d.button_delete_custom.invoke()
eq(yesno.called, 1)
eq(highpage[theme_name], {'option': 'True'})
eq(idleConf.GetSectionList('user', 'highlight'), [theme_name, theme_name2])
eq(dialog.deactivate_current_config.called, 0)
eq(dialog.activate_config_changes.called, 0)
eq(d.set_theme_type.called, 0)
# Confirm deletion.
yesno.result = True
d.button_delete_custom.invoke()
eq(yesno.called, 2)
self.assertNotIn(theme_name, highpage)
eq(idleConf.GetSectionList('user', 'highlight'), [theme_name2])
eq(d.custom_theme_on.state(), ())
eq(d.custom_name.get(), theme_name2)
eq(dialog.deactivate_current_config.called, 1)
eq(dialog.activate_config_changes.called, 1)
eq(d.set_theme_type.called, 1)
# Confirm deletion of second theme - empties list.
d.custom_name.set(theme_name2)
yesno.result = True
d.button_delete_custom.invoke()
eq(yesno.called, 3)
self.assertNotIn(theme_name, highpage)
eq(idleConf.GetSectionList('user', 'highlight'), [])
eq(d.custom_theme_on.state(), ('disabled',))
eq(d.custom_name.get(), '- no custom themes -')
eq(dialog.deactivate_current_config.called, 2)
eq(dialog.activate_config_changes.called, 2)
eq(d.set_theme_type.called, 2)
del dialog.activate_config_changes, dialog.deactivate_current_config
del d.askyesno
class KeysPageTest(unittest.TestCase):
"""Test that keys tab widgets enable users to make changes.
Test that widget actions set vars, that var changes add
options to changes and that key sets works correctly.
"""
@classmethod
def setUpClass(cls):
page = cls.page = dialog.keyspage
dialog.note.select(page)
page.set_keys_type = Func()
page.load_keys_list = Func()
@classmethod
def tearDownClass(cls):
page = cls.page
del page.set_keys_type, page.load_keys_list
def setUp(self):
d = self.page
# The following is needed for test_load_key_cfg, _delete_custom_keys.
# This may indicate a defect in some test or function.
for section in idleConf.GetSectionList('user', 'keys'):
idleConf.userCfg['keys'].remove_section(section)
changes.clear()
d.set_keys_type.called = 0
d.load_keys_list.called = 0
def test_load_key_cfg(self):
tracers.detach()
d = self.page
eq = self.assertEqual
# Use builtin keyset with no user keysets created.
idleConf.CurrentKeys = mock.Mock(return_value='IDLE Classic OSX')
d.load_key_cfg()
self.assertTrue(d.keyset_source.get())
# builtinlist sets variable builtin_name to the CurrentKeys default.
eq(d.builtin_name.get(), 'IDLE Classic OSX')
eq(d.custom_name.get(), '- no custom keys -')
eq(d.custom_keyset_on.state(), ('disabled',))
eq(d.set_keys_type.called, 1)
eq(d.load_keys_list.called, 1)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
# Builtin keyset with non-empty user keyset list.
idleConf.SetOption('keys', 'test1', 'option', 'value')
idleConf.SetOption('keys', 'test2', 'option2', 'value2')
d.load_key_cfg()
eq(d.builtin_name.get(), 'IDLE Classic OSX')
eq(d.custom_name.get(), 'test1')
eq(d.set_keys_type.called, 2)
eq(d.load_keys_list.called, 2)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
# Use custom keyset.
idleConf.CurrentKeys = mock.Mock(return_value='test2')
idleConf.default_keys = mock.Mock(return_value='IDLE Modern Unix')
idleConf.SetOption('main', 'Keys', 'default', '0')
d.load_key_cfg()
self.assertFalse(d.keyset_source.get())
eq(d.builtin_name.get(), 'IDLE Modern Unix')
eq(d.custom_name.get(), 'test2')
eq(d.set_keys_type.called, 3)
eq(d.load_keys_list.called, 3)
eq(d.load_keys_list.args, ('test2', ))
del idleConf.CurrentKeys, idleConf.default_keys
tracers.attach()
def test_keyset_source(self):
eq = self.assertEqual
d = self.page
# Test these separately.
d.var_changed_builtin_name = Func()
d.var_changed_custom_name = Func()
# Builtin selected.
d.builtin_keyset_on.invoke()
eq(mainpage, {'Keys': {'default': 'True'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 0)
changes.clear()
# Custom selected.
d.custom_keyset_on.state(('!disabled',))
d.custom_keyset_on.invoke()
self.assertEqual(mainpage, {'Keys': {'default': 'False'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 1)
del d.var_changed_builtin_name, d.var_changed_custom_name
def test_builtin_name(self):
eq = self.assertEqual
d = self.page
idleConf.userCfg['main'].remove_section('Keys')
item_list = ['IDLE Classic Windows', 'IDLE Classic OSX',
'IDLE Modern UNIX']
# Not in old_keys, defaults name to first item.
d.builtinlist.SetMenu(item_list, 'IDLE Modern UNIX')
eq(mainpage, {'Keys': {'name': 'IDLE Classic Windows',
'name2': 'IDLE Modern UNIX'}})
eq(d.keys_message['text'], 'New key set, see Help')
eq(d.load_keys_list.called, 1)
eq(d.load_keys_list.args, ('IDLE Modern UNIX', ))
# Not in old keys - uses name2.
changes.clear()
idleConf.SetOption('main', 'Keys', 'name', 'IDLE Classic Unix')
d.builtinlist.SetMenu(item_list, 'IDLE Modern UNIX')
eq(mainpage, {'Keys': {'name2': 'IDLE Modern UNIX'}})
eq(d.keys_message['text'], 'New key set, see Help')
eq(d.load_keys_list.called, 2)
eq(d.load_keys_list.args, ('IDLE Modern UNIX', ))
# Builtin name in old_keys.
changes.clear()
d.builtinlist.SetMenu(item_list, 'IDLE Classic OSX')
eq(mainpage, {'Keys': {'name': 'IDLE Classic OSX', 'name2': ''}})
eq(d.keys_message['text'], '')
eq(d.load_keys_list.called, 3)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
def test_custom_name(self):
d = self.page
# If no selections, doesn't get added.
d.customlist.SetMenu([], '- no custom keys -')
self.assertNotIn('Keys', mainpage)
self.assertEqual(d.load_keys_list.called, 0)
# Custom name selected.
changes.clear()
d.customlist.SetMenu(['a', 'b', 'c'], 'c')
self.assertEqual(mainpage, {'Keys': {'name': 'c'}})
self.assertEqual(d.load_keys_list.called, 1)
def test_keybinding(self):
idleConf.SetOption('extensions', 'ZzDummy', 'enable', 'True')
d = self.page
d.custom_name.set('my custom keys')
d.bindingslist.delete(0, 'end')
d.bindingslist.insert(0, 'copy')
d.bindingslist.insert(1, 'z-in')
d.bindingslist.selection_set(0)
d.bindingslist.selection_anchor(0)
# Core binding - adds to keys.
d.keybinding.set('<Key-F11>')
self.assertEqual(keyspage,
{'my custom keys': {'copy': '<Key-F11>'}})
# Not a core binding - adds to extensions.
d.bindingslist.selection_set(1)
d.bindingslist.selection_anchor(1)
d.keybinding.set('<Key-F11>')
self.assertEqual(extpage,
{'ZzDummy_cfgBindings': {'z-in': '<Key-F11>'}})
def test_set_keys_type(self):
eq = self.assertEqual
d = self.page
del d.set_keys_type
# Builtin keyset selected.
d.keyset_source.set(True)
d.set_keys_type()
eq(d.builtinlist['state'], NORMAL)
eq(d.customlist['state'], DISABLED)
eq(d.button_delete_custom_keys.state(), ('disabled',))
# Custom keyset selected.
d.keyset_source.set(False)
d.set_keys_type()
eq(d.builtinlist['state'], DISABLED)
eq(d.custom_keyset_on.state(), ('selected',))
eq(d.customlist['state'], NORMAL)
eq(d.button_delete_custom_keys.state(), ())
d.set_keys_type = Func()
def test_get_new_keys(self):
eq = self.assertEqual
d = self.page
orig_getkeysdialog = configdialog.GetKeysDialog
gkd = configdialog.GetKeysDialog = Func(return_self=True)
gnkn = d.get_new_keys_name = Func()
d.button_new_keys.state(('!disabled',))
d.bindingslist.delete(0, 'end')
d.bindingslist.insert(0, 'copy - <Control-Shift-Key-C>')
d.bindingslist.selection_set(0)
d.bindingslist.selection_anchor(0)
d.keybinding.set('Key-a')
d.keyset_source.set(True) # Default keyset.
# Default keyset; no change to binding.
gkd.result = ''
d.button_new_keys.invoke()
eq(d.bindingslist.get('anchor'), 'copy - <Control-Shift-Key-C>')
# Keybinding isn't changed when there isn't a change entered.
eq(d.keybinding.get(), 'Key-a')
# Default keyset; binding changed.
gkd.result = '<Key-F11>'
# No keyset name selected therefore binding not saved.
gnkn.result = ''
d.button_new_keys.invoke()
eq(gnkn.called, 1)
eq(d.bindingslist.get('anchor'), 'copy - <Control-Shift-Key-C>')
# Keyset name selected.
gnkn.result = 'My New Key Set'
d.button_new_keys.invoke()
eq(d.custom_name.get(), gnkn.result)
eq(d.bindingslist.get('anchor'), 'copy - <Key-F11>')
eq(d.keybinding.get(), '<Key-F11>')
# User keyset; binding changed.
d.keyset_source.set(False) # Custom keyset.
gnkn.called = 0
gkd.result = '<Key-p>'
d.button_new_keys.invoke()
eq(gnkn.called, 0)
eq(d.bindingslist.get('anchor'), 'copy - <Key-p>')
eq(d.keybinding.get(), '<Key-p>')
del d.get_new_keys_name
configdialog.GetKeysDialog = orig_getkeysdialog
def test_get_new_keys_name(self):
orig_sectionname = configdialog.SectionName
sn = configdialog.SectionName = Func(return_self=True)
d = self.page
sn.result = 'New Keys'
self.assertEqual(d.get_new_keys_name(''), 'New Keys')
configdialog.SectionName = orig_sectionname
def test_save_as_new_key_set(self):
d = self.page
gnkn = d.get_new_keys_name = Func()
d.keyset_source.set(True)
# No name entered.
gnkn.result = ''
d.button_save_custom_keys.invoke()
# Name entered.
gnkn.result = 'my new key set'
gnkn.called = 0
self.assertNotIn(gnkn.result, idleConf.userCfg['keys'])
d.button_save_custom_keys.invoke()
self.assertIn(gnkn.result, idleConf.userCfg['keys'])
del d.get_new_keys_name
def test_on_bindingslist_select(self):
d = self.page
b = d.bindingslist
b.delete(0, 'end')
b.insert(0, 'copy')
b.insert(1, 'find')
b.activate(0)
b.focus_force()
b.see(1)
b.update()
x, y, dx, dy = b.bbox(1)
x += dx // 2
y += dy // 2
b.event_generate('<Enter>', x=0, y=0)
b.event_generate('<Motion>', x=x, y=y)
b.event_generate('<Button-1>', x=x, y=y)
b.event_generate('<ButtonRelease-1>', x=x, y=y)
self.assertEqual(b.get('anchor'), 'find')
self.assertEqual(d.button_new_keys.state(), ())
def test_create_new_key_set_and_save_new_key_set(self):
eq = self.assertEqual
d = self.page
# Use default as previously active keyset.
d.keyset_source.set(True)
d.builtin_name.set('IDLE Classic Windows')
first_new = 'my new custom key set'
second_new = 'my second custom keyset'
# No changes, so keysets are an exact copy.
self.assertNotIn(first_new, idleConf.userCfg)
d.create_new_key_set(first_new)
eq(idleConf.GetSectionList('user', 'keys'), [first_new])
eq(idleConf.GetKeySet('IDLE Classic Windows'),
idleConf.GetKeySet(first_new))
eq(d.custom_name.get(), first_new)
self.assertFalse(d.keyset_source.get()) # Use custom set.
eq(d.set_keys_type.called, 1)
# Test that changed keybindings are in new keyset.
changes.add_option('keys', first_new, 'copy', '<Key-F11>')
self.assertNotIn(second_new, idleConf.userCfg)
d.create_new_key_set(second_new)
eq(idleConf.GetSectionList('user', 'keys'), [first_new, second_new])
self.assertNotEqual(idleConf.GetKeySet(first_new),
idleConf.GetKeySet(second_new))
# Check that difference in keysets was in option `copy` from `changes`.
idleConf.SetOption('keys', first_new, 'copy', '<Key-F11>')
eq(idleConf.GetKeySet(first_new), idleConf.GetKeySet(second_new))
def test_load_keys_list(self):
eq = self.assertEqual
d = self.page
gks = idleConf.GetKeySet = Func()
del d.load_keys_list
b = d.bindingslist
b.delete(0, 'end')
b.insert(0, '<<find>>')
b.insert(1, '<<help>>')
gks.result = {'<<copy>>': ['<Control-Key-c>', '<Control-Key-C>'],
'<<force-open-completions>>': ['<Control-Key-space>'],
'<<spam>>': ['<Key-F11>']}
changes.add_option('keys', 'my keys', 'spam', '<Shift-Key-a>')
expected = ('copy - <Control-Key-c> <Control-Key-C>',
'force-open-completions - <Control-Key-space>',
'spam - <Shift-Key-a>')
# No current selection.
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), '')
eq(b.curselection(), ())
# Check selection.
b.selection_set(1)
b.selection_anchor(1)
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), 'force-open-completions - <Control-Key-space>')
eq(b.curselection(), (1, ))
# Change selection.
b.selection_set(2)
b.selection_anchor(2)
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), 'spam - <Shift-Key-a>')
eq(b.curselection(), (2, ))
d.load_keys_list = Func()
del idleConf.GetKeySet
def test_delete_custom_keys(self):
eq = self.assertEqual
d = self.page
d.button_delete_custom_keys.state(('!disabled',))
yesno = d.askyesno = Func()
dialog.deactivate_current_config = Func()
dialog.activate_config_changes = Func()
keyset_name = 'spam key set'
idleConf.userCfg['keys'].SetOption(keyset_name, 'name', 'value')
keyspage[keyset_name] = {'option': 'True'}
keyset_name2 = 'other key set'
idleConf.userCfg['keys'].SetOption(keyset_name2, 'name', 'value')
keyspage[keyset_name2] = {'option': 'False'}
# Force custom keyset.
d.custom_keyset_on.state(('!disabled',))
d.custom_keyset_on.invoke()
d.custom_name.set(keyset_name)
# Cancel deletion.
yesno.result = False
d.button_delete_custom_keys.invoke()
eq(yesno.called, 1)
eq(keyspage[keyset_name], {'option': 'True'})
eq(idleConf.GetSectionList('user', 'keys'), [keyset_name, keyset_name2])
eq(dialog.deactivate_current_config.called, 0)
eq(dialog.activate_config_changes.called, 0)
eq(d.set_keys_type.called, 0)
# Confirm deletion.
yesno.result = True
d.button_delete_custom_keys.invoke()
eq(yesno.called, 2)
self.assertNotIn(keyset_name, keyspage)
eq(idleConf.GetSectionList('user', 'keys'), [keyset_name2])
eq(d.custom_keyset_on.state(), ())
eq(d.custom_name.get(), keyset_name2)
eq(dialog.deactivate_current_config.called, 1)
eq(dialog.activate_config_changes.called, 1)
eq(d.set_keys_type.called, 1)
# Confirm deletion of second keyset - empties list.
d.custom_name.set(keyset_name2)
yesno.result = True
d.button_delete_custom_keys.invoke()
eq(yesno.called, 3)
self.assertNotIn(keyset_name, keyspage)
eq(idleConf.GetSectionList('user', 'keys'), [])
eq(d.custom_keyset_on.state(), ('disabled',))
eq(d.custom_name.get(), '- no custom keys -')
eq(dialog.deactivate_current_config.called, 2)
eq(dialog.activate_config_changes.called, 2)
eq(d.set_keys_type.called, 2)
del dialog.activate_config_changes, dialog.deactivate_current_config
del d.askyesno
class WinPageTest(unittest.TestCase):
"""Test that general tab widgets enable users to make changes.
Test that widget actions set vars, that var changes add
options to changes.
"""
@classmethod
def setUpClass(cls):
page = cls.page = dialog.winpage
dialog.note.select(page)
page.update()
def setUp(self):
changes.clear()
def test_load_windows_cfg(self):
# Set to wrong values, load, check right values.
eq = self.assertEqual
d = self.page
d.startup_edit.set(1)
d.win_width.set(1)
d.win_height.set(1)
d.load_windows_cfg()
eq(d.startup_edit.get(), 0)
eq(d.win_width.get(), '80')
eq(d.win_height.get(), '40')
def test_startup(self):
d = self.page
d.startup_editor_on.invoke()
self.assertEqual(mainpage,
{'General': {'editor-on-startup': '1'}})
changes.clear()
d.startup_shell_on.invoke()
self.assertEqual(mainpage,
{'General': {'editor-on-startup': '0'}})
def test_editor_size(self):
d = self.page
d.win_height_int.delete(0, 'end')
d.win_height_int.insert(0, '11')
self.assertEqual(mainpage, {'EditorWindow': {'height': '11'}})
changes.clear()
d.win_width_int.delete(0, 'end')
d.win_width_int.insert(0, '11')
self.assertEqual(mainpage, {'EditorWindow': {'width': '11'}})
def test_cursor_blink(self):
self.page.cursor_blink_bool.invoke()
self.assertEqual(mainpage, {'EditorWindow': {'cursor-blink': 'False'}})
def test_autocomplete_wait(self):
self.page.auto_wait_int.delete(0, 'end')
self.page.auto_wait_int.insert(0, '11')
self.assertEqual(extpage, {'AutoComplete': {'popupwait': '11'}})
def test_parenmatch(self):
d = self.page
eq = self.assertEqual
d.paren_style_type['menu'].invoke(0)
eq(extpage, {'ParenMatch': {'style': 'opener'}})
changes.clear()
d.paren_flash_time.delete(0, 'end')
d.paren_flash_time.insert(0, '11')
eq(extpage, {'ParenMatch': {'flash-delay': '11'}})
changes.clear()
d.bell_on.invoke()
eq(extpage, {'ParenMatch': {'bell': 'False'}})
def test_paragraph(self):
self.page.format_width_int.delete(0, 'end')
self.page.format_width_int.insert(0, '11')
self.assertEqual(extpage, {'FormatParagraph': {'max-width': '11'}})
class GenPageTest(unittest.TestCase):
"""Test that shed tab widgets enable users to make changes.
Test that widget actions set vars, that var changes add
options to changes.
"""
@classmethod
def setUpClass(cls):
page = cls.page = dialog.shedpage
dialog.note.select(page)
page.update()
def setUp(self):
changes.clear()
def test_load_shelled_cfg(self):
# Set to wrong values, load, check right values.
eq = self.assertEqual
d = self.page
d.autosave.set(1)
d.load_shelled_cfg()
eq(d.autosave.get(), 0)
def test_autosave(self):
d = self.page
d.save_auto_on.invoke()
self.assertEqual(mainpage, {'General': {'autosave': '1'}})
d.save_ask_on.invoke()
self.assertEqual(mainpage, {'General': {'autosave': '0'}})
def test_context(self):
self.page.context_int.delete(0, 'end')
self.page.context_int.insert(0, '1')
self.assertEqual(extpage, {'CodeContext': {'maxlines': '1'}})
#unittest.skip("Nothing here yet TODO")
class ExtPageTest(unittest.TestCase):
"""Test that the help source list works correctly."""
@classmethod
def setUpClass(cls):
page = dialog.extpage
dialog.note.select(page)
class HelpSourceTest(unittest.TestCase):
"""Test that the help source list works correctly."""
@classmethod
def setUpClass(cls):
page = dialog.extpage
dialog.note.select(page)
frame = cls.frame = page.frame_help
frame.set = frame.set_add_delete_state = Func()
frame.upc = frame.update_help_changes = Func()
frame.update()
@classmethod
def tearDownClass(cls):
frame = cls.frame
del frame.set, frame.set_add_delete_state
del frame.upc, frame.update_help_changes
frame.helplist.delete(0, 'end')
frame.user_helplist.clear()
def setUp(self):
changes.clear()
def test_load_helplist(self):
eq = self.assertEqual
fr = self.frame
fr.helplist.insert('end', 'bad')
fr.user_helplist = ['bad', 'worse']
idleConf.SetOption('main', 'HelpFiles', '1', 'name;file')
fr.load_helplist()
eq(fr.helplist.get(0, 'end'), ('name',))
eq(fr.user_helplist, [('name', 'file', '1')])
def test_source_selected(self):
fr = self.frame
fr.set = fr.set_add_delete_state
fr.upc = fr.update_help_changes
helplist = fr.helplist
dex = 'end'
helplist.insert(dex, 'source')
helplist.activate(dex)
helplist.focus_force()
helplist.see(dex)
helplist.update()
x, y, dx, dy = helplist.bbox(dex)
x += dx // 2
y += dy // 2
fr.set.called = fr.upc.called = 0
helplist.event_generate('<Enter>', x=0, y=0)
helplist.event_generate('<Motion>', x=x, y=y)
helplist.event_generate('<Button-1>', x=x, y=y)
helplist.event_generate('<ButtonRelease-1>', x=x, y=y)
self.assertEqual(helplist.get('anchor'), 'source')
self.assertTrue(fr.set.called)
self.assertFalse(fr.upc.called)
def test_set_add_delete_state(self):
# Call with 0 items, 1 unselected item, 1 selected item.
eq = self.assertEqual
fr = self.frame
del fr.set_add_delete_state # Unmask method.
sad = fr.set_add_delete_state
h = fr.helplist
h.delete(0, 'end')
sad()
eq(fr.button_helplist_edit.state(), ('disabled',))
eq(fr.button_helplist_remove.state(), ('disabled',))
h.insert(0, 'source')
sad()
eq(fr.button_helplist_edit.state(), ('disabled',))
eq(fr.button_helplist_remove.state(), ('disabled',))
h.selection_set(0)
sad()
eq(fr.button_helplist_edit.state(), ())
eq(fr.button_helplist_remove.state(), ())
fr.set_add_delete_state = Func() # Mask method.
def test_helplist_item_add(self):
# Call without and twice with HelpSource result.
# Double call enables check on order.
eq = self.assertEqual
orig_helpsource = configdialog.HelpSource
hs = configdialog.HelpSource = Func(return_self=True)
fr = self.frame
fr.helplist.delete(0, 'end')
fr.user_helplist.clear()
fr.set.called = fr.upc.called = 0
hs.result = ''
fr.helplist_item_add()
self.assertTrue(list(fr.helplist.get(0, 'end')) ==
fr.user_helplist == [])
self.assertFalse(fr.upc.called)
hs.result = ('name1', 'file1')
fr.helplist_item_add()
hs.result = ('name2', 'file2')
fr.helplist_item_add()
eq(fr.helplist.get(0, 'end'), ('name1', 'name2'))
eq(fr.user_helplist, [('name1', 'file1'), ('name2', 'file2')])
eq(fr.upc.called, 2)
self.assertFalse(fr.set.called)
configdialog.HelpSource = orig_helpsource
def test_helplist_item_edit(self):
# Call without and with HelpSource change.
eq = self.assertEqual
orig_helpsource = configdialog.HelpSource
hs = configdialog.HelpSource = Func(return_self=True)
fr = self.frame
fr.helplist.delete(0, 'end')
fr.helplist.insert(0, 'name1')
fr.helplist.selection_set(0)
fr.helplist.selection_anchor(0)
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.set.called = fr.upc.called = 0
hs.result = ''
fr.helplist_item_edit()
hs.result = ('name1', 'file1')
fr.helplist_item_edit()
eq(fr.helplist.get(0, 'end'), ('name1',))
eq(fr.user_helplist, [('name1', 'file1')])
self.assertFalse(fr.upc.called)
hs.result = ('name2', 'file2')
fr.helplist_item_edit()
eq(fr.helplist.get(0, 'end'), ('name2',))
eq(fr.user_helplist, [('name2', 'file2')])
self.assertTrue(fr.upc.called == fr.set.called == 1)
configdialog.HelpSource = orig_helpsource
def test_helplist_item_remove(self):
eq = self.assertEqual
fr = self.frame
fr.helplist.delete(0, 'end')
fr.helplist.insert(0, 'name1')
fr.helplist.selection_set(0)
fr.helplist.selection_anchor(0)
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.set.called = fr.upc.called = 0
fr.helplist_item_remove()
eq(fr.helplist.get(0, 'end'), ())
eq(fr.user_helplist, [])
self.assertTrue(fr.upc.called == fr.set.called == 1)
def test_update_help_changes(self):
fr = self.frame
del fr.update_help_changes
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.user_helplist.append(('name2', 'file2'))
fr.update_help_changes()
self.assertEqual(mainpage['HelpFiles'],
{'1': 'name1;file1', '2': 'name2;file2'})
fr.update_help_changes = Func()
class VarTraceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tracers = configdialog.VarTrace()
cls.iv = IntVar(root)
cls.bv = BooleanVar(root)
@classmethod
def tearDownClass(cls):
del cls.tracers, cls.iv, cls.bv
def setUp(self):
self.tracers.clear()
self.called = 0
def var_changed_increment(self, *params):
self.called += 13
def var_changed_boolean(self, *params):
pass
def test_init(self):
tr = self.tracers
tr.__init__()
self.assertEqual(tr.untraced, [])
self.assertEqual(tr.traced, [])
def test_clear(self):
tr = self.tracers
tr.untraced.append(0)
tr.traced.append(1)
tr.clear()
self.assertEqual(tr.untraced, [])
self.assertEqual(tr.traced, [])
def test_add(self):
tr = self.tracers
func = Func()
cb = tr.make_callback = mock.Mock(return_value=func)
iv = tr.add(self.iv, self.var_changed_increment)
self.assertIs(iv, self.iv)
bv = tr.add(self.bv, self.var_changed_boolean)
self.assertIs(bv, self.bv)
sv = StringVar(root)
sv2 = tr.add(sv, ('main', 'section', 'option'))
self.assertIs(sv2, sv)
cb.assert_called_once()
cb.assert_called_with(sv, ('main', 'section', 'option'))
expected = [(iv, self.var_changed_increment),
(bv, self.var_changed_boolean),
(sv, func)]
self.assertEqual(tr.traced, [])
self.assertEqual(tr.untraced, expected)
del tr.make_callback
def test_make_callback(self):
cb = self.tracers.make_callback(self.iv, ('main', 'section', 'option'))
self.assertTrue(callable(cb))
self.iv.set(42)
# Not attached, so set didn't invoke the callback.
self.assertNotIn('section', changes['main'])
# Invoke callback manually.
cb()
self.assertIn('section', changes['main'])
self.assertEqual(changes['main']['section']['option'], '42')
changes.clear()
def test_attach_detach(self):
tr = self.tracers
iv = tr.add(self.iv, self.var_changed_increment)
bv = tr.add(self.bv, self.var_changed_boolean)
expected = [(iv, self.var_changed_increment),
(bv, self.var_changed_boolean)]
# Attach callbacks and test call increment.
tr.attach()
self.assertEqual(tr.untraced, [])
self.assertCountEqual(tr.traced, expected)
iv.set(1)
self.assertEqual(iv.get(), 1)
self.assertEqual(self.called, 13)
# Check that only one callback is attached to a variable.
# If more than one callback were attached, then var_changed_increment
# would be called twice and the counter would be 2.
self.called = 0
tr.attach()
iv.set(1)
self.assertEqual(self.called, 13)
# Detach callbacks.
self.called = 0
tr.detach()
self.assertEqual(tr.traced, [])
self.assertCountEqual(tr.untraced, expected)
iv.set(1)
self.assertEqual(self.called, 0)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 34.79925 | 89 | 0.603875 | from idlelib import configdialog
from test.support import requires
requires('gui')
import unittest
from unittest import mock
from idlelib.idle_test.mock_idle import Func
from tkinter import (Tk, StringVar, IntVar, BooleanVar, DISABLED, NORMAL)
from idlelib import config
from idlelib.configdialog import idleConf, changes, tracers
usercfg = idleConf.userCfg
testcfg = {
'main': config.IdleUserConfParser(''),
'highlight': config.IdleUserConfParser(''),
'keys': config.IdleUserConfParser(''),
'extensions': config.IdleUserConfParser(''),
}
root = None
dialog = None
mainpage = changes['main']
highpage = changes['highlight']
keyspage = changes['keys']
extpage = changes['extensions']
def setUpModule():
global root, dialog
idleConf.userCfg = testcfg
root = Tk()
figDialog(root, 'Test', _utest=True)
def tearDownModule():
global root, dialog
idleConf.userCfg = usercfg
tracers.detach()
tracers.clear()
changes.clear()
root.update_idletasks()
root.destroy()
root = dialog = None
class ConfigDialogTest(unittest.TestCase):
def test_deactivate_current_config(self):
pass
def activate_config_changes(self):
pass
class ButtonTest(unittest.TestCase):
def test_click_ok(self):
d = dialog
apply = d.apply = mock.Mock()
destroy = d.destroy = mock.Mock()
d.buttons['Ok'].invoke()
apply.assert_called_once()
destroy.assert_called_once()
del d.destroy, d.apply
def test_click_apply(self):
d = dialog
deactivate = d.deactivate_current_config = mock.Mock()
save_ext = d.save_all_changed_extensions = mock.Mock()
activate = d.activate_config_changes = mock.Mock()
d.buttons['Apply'].invoke()
deactivate.assert_called_once()
save_ext.assert_called_once()
activate.assert_called_once()
del d.save_all_changed_extensions
del d.activate_config_changes, d.deactivate_current_config
def test_click_cancel(self):
d = dialog
d.destroy = Func()
changes['main']['something'] = 1
d.buttons['Cancel'].invoke()
self.assertEqual(changes['main'], {})
self.assertEqual(d.destroy.called, 1)
del d.destroy
def test_click_help(self):
dialog.note.select(dialog.keyspage)
with mock.patch.object(configdialog, 'view_text',
new_callable=Func) as view:
dialog.buttons['Help'].invoke()
title, contents = view.kwds['title'], view.kwds['contents']
self.assertEqual(title, 'Help for IDLE preferences')
self.assertTrue(contents.startswith('When you click') and
contents.endswith('a different name.\n'))
class FontPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = cls.page = dialog.fontpage
dialog.note.select(page)
page.set_samples = Func()
page.update()
@classmethod
def tearDownClass(cls):
del cls.page.set_samples
def setUp(self):
changes.clear()
def test_load_font_cfg(self):
tracers.detach()
d = self.page
d.font_name.set('Fake')
d.font_size.set('1')
d.font_bold.set(True)
d.set_samples.called = 0
d.load_font_cfg()
self.assertNotEqual(d.font_name.get(), 'Fake')
self.assertNotEqual(d.font_size.get(), '1')
self.assertFalse(d.font_bold.get())
self.assertEqual(d.set_samples.called, 1)
tracers.attach()
def test_fontlist_key(self):
d = self.page
if d.fontlist.size() < 2:
self.skipTest('need at least 2 fonts')
fontlist = d.fontlist
fontlist.activate(0)
font = d.fontlist.get('active')
fontlist.focus_force()
fontlist.update()
fontlist.event_generate('<Key-Down>')
fontlist.event_generate('<KeyRelease-Down>')
down_font = fontlist.get('active')
self.assertNotEqual(down_font, font)
self.assertIn(d.font_name.get(), down_font.lower())
fontlist.focus_force()
fontlist.update()
fontlist.event_generate('<Key-Up>')
fontlist.event_generate('<KeyRelease-Up>')
up_font = fontlist.get('active')
self.assertEqual(up_font, font)
self.assertIn(d.font_name.get(), up_font.lower())
def test_fontlist_mouse(self):
d = self.page
if d.fontlist.size() < 2:
self.skipTest('need at least 2 fonts')
fontlist = d.fontlist
fontlist.activate(0)
fontlist.focus_force()
fontlist.see(1)
fontlist.update()
x, y, dx, dy = fontlist.bbox(1)
x += dx // 2
y += dy // 2
fontlist.event_generate('<Button-1>', x=x, y=y)
fontlist.event_generate('<ButtonRelease-1>', x=x, y=y)
font1 = fontlist.get(1)
select_font = fontlist.get('anchor')
self.assertEqual(select_font, font1)
self.assertIn(d.font_name.get(), font1.lower())
def test_sizelist(self):
d = self.page
d.sizelist.variable.set(40)
self.assertEqual(d.font_size.get(), '40')
def test_bold_toggle(self):
d = self.page
d.font_bold.set(False)
d.bold_toggle.invoke()
self.assertTrue(d.font_bold.get())
d.bold_toggle.invoke()
self.assertFalse(d.font_bold.get())
def test_font_set(self):
default_font = idleConf.GetFont(root, 'main', 'EditorWindow')
default_size = str(default_font[1])
default_bold = default_font[2] == 'bold'
d = self.page
d.font_size.set(default_size)
d.font_bold.set(default_bold)
d.set_samples.called = 0
d.font_name.set('Test Font')
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': default_size,
'font-bold': str(default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 1)
changes.clear()
d.font_size.set('20')
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': '20',
'font-bold': str(default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 2)
changes.clear()
d.font_bold.set(not default_bold)
expected = {'EditorWindow': {'font': 'Test Font',
'font-size': '20',
'font-bold': str(not default_bold)}}
self.assertEqual(mainpage, expected)
self.assertEqual(d.set_samples.called, 3)
def test_set_samples(self):
d = self.page
del d.set_samples
orig_samples = d.font_sample, d.highlight_sample
d.font_sample, d.highlight_sample = {}, {}
d.font_name.set('test')
d.font_size.set('5')
d.font_bold.set(1)
expected = {'font': ('test', '5', 'bold')}
d.set_samples()
self.assertTrue(d.font_sample == d.highlight_sample == expected)
d.font_sample, d.highlight_sample = orig_samples
d.set_samples = Func()
class IndentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.page = dialog.fontpage
cls.page.update()
def test_load_tab_cfg(self):
d = self.page
d.space_num.set(16)
d.load_tab_cfg()
self.assertEqual(d.space_num.get(), 4)
def test_indent_scale(self):
d = self.page
changes.clear()
d.indent_scale.set(20)
self.assertEqual(d.space_num.get(), 16)
self.assertEqual(mainpage, {'Indent': {'num-spaces': '16'}})
class HighPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = cls.page = dialog.highpage
dialog.note.select(page)
page.set_theme_type = Func()
page.paint_theme_sample = Func()
page.set_highlight_target = Func()
page.set_color_sample = Func()
page.update()
@classmethod
def tearDownClass(cls):
d = cls.page
del d.set_theme_type, d.paint_theme_sample
del d.set_highlight_target, d.set_color_sample
def setUp(self):
d = self.page
for section in idleConf.GetSectionList('user', 'highlight'):
idleConf.userCfg['highlight'].remove_section(section)
changes.clear()
d.set_theme_type.called = 0
d.paint_theme_sample.called = 0
d.set_highlight_target.called = 0
d.set_color_sample.called = 0
def test_load_theme_cfg(self):
tracers.detach()
d = self.page
eq = self.assertEqual
idleConf.CurrentTheme = mock.Mock(return_value='IDLE Classic')
d.load_theme_cfg()
self.assertTrue(d.theme_source.get())
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), '- no custom themes -')
eq(d.custom_theme_on.state(), ('disabled',))
eq(d.set_theme_type.called, 1)
eq(d.paint_theme_sample.called, 1)
eq(d.set_highlight_target.called, 1)
idleConf.SetOption('highlight', 'test1', 'option', 'value')
idleConf.SetOption('highlight', 'test2', 'option2', 'value2')
d.load_theme_cfg()
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), 'test1')
eq(d.set_theme_type.called, 2)
eq(d.paint_theme_sample.called, 2)
eq(d.set_highlight_target.called, 2)
idleConf.CurrentTheme = mock.Mock(return_value='test2')
idleConf.SetOption('main', 'Theme', 'default', '0')
d.load_theme_cfg()
self.assertFalse(d.theme_source.get())
eq(d.builtin_name.get(), 'IDLE Classic')
eq(d.custom_name.get(), 'test2')
eq(d.set_theme_type.called, 3)
eq(d.paint_theme_sample.called, 3)
eq(d.set_highlight_target.called, 3)
del idleConf.CurrentTheme
tracers.attach()
def test_theme_source(self):
eq = self.assertEqual
d = self.page
d.var_changed_builtin_name = Func()
d.var_changed_custom_name = Func()
d.builtin_theme_on.invoke()
eq(mainpage, {'Theme': {'default': 'True'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 0)
changes.clear()
d.custom_theme_on.state(('!disabled',))
d.custom_theme_on.invoke()
self.assertEqual(mainpage, {'Theme': {'default': 'False'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 1)
del d.var_changed_builtin_name, d.var_changed_custom_name
def test_builtin_name(self):
eq = self.assertEqual
d = self.page
item_list = ['IDLE Classic', 'IDLE Dark', 'IDLE New']
idleConf.SetOption('main', 'Theme', 'name', 'spam')
d.builtinlist.SetMenu(item_list, 'IDLE Dark')
eq(mainpage, {'Theme': {'name': 'IDLE Classic',
'name2': 'IDLE Dark'}})
eq(d.theme_message['text'], 'New theme, see Help')
eq(d.paint_theme_sample.called, 1)
changes.clear()
idleConf.SetOption('main', 'Theme', 'name', 'IDLE New')
d.builtinlist.SetMenu(item_list, 'IDLE Dark')
eq(mainpage, {'Theme': {'name2': 'IDLE Dark'}})
eq(d.theme_message['text'], 'New theme, see Help')
eq(d.paint_theme_sample.called, 2)
changes.clear()
d.builtinlist.SetMenu(item_list, 'IDLE Classic')
eq(mainpage, {'Theme': {'name': 'IDLE Classic', 'name2': ''}})
eq(d.theme_message['text'], '')
eq(d.paint_theme_sample.called, 3)
def test_custom_name(self):
d = self.page
d.customlist.SetMenu([], '- no custom themes -')
self.assertNotIn('Theme', mainpage)
self.assertEqual(d.paint_theme_sample.called, 0)
# Custom name selected.
changes.clear()
d.customlist.SetMenu(['a', 'b', 'c'], 'c')
self.assertEqual(mainpage, {'Theme': {'name': 'c'}})
self.assertEqual(d.paint_theme_sample.called, 1)
def test_color(self):
d = self.page
d.on_new_color_set = Func()
# self.color is only set in get_color through colorchooser.
d.color.set('green')
self.assertEqual(d.on_new_color_set.called, 1)
del d.on_new_color_set
def test_highlight_target_list_mouse(self):
# Set highlight_target through targetlist.
eq = self.assertEqual
d = self.page
d.targetlist.SetMenu(['a', 'b', 'c'], 'c')
eq(d.highlight_target.get(), 'c')
eq(d.set_highlight_target.called, 1)
def test_highlight_target_text_mouse(self):
# Set highlight_target through clicking highlight_sample.
eq = self.assertEqual
d = self.page
elem = {}
count = 0
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
def tag_to_element(elem):
for element, tag in d.theme_elements.items():
elem[tag[0]] = element
def click_it(start):
x, y, dx, dy = hs.bbox(start)
x += dx // 2
y += dy // 2
hs.event_generate('<Enter>', x=0, y=0)
hs.event_generate('<Motion>', x=x, y=y)
hs.event_generate('<ButtonPress-1>', x=x, y=y)
hs.event_generate('<ButtonRelease-1>', x=x, y=y)
# Flip theme_elements to make the tag the key.
tag_to_element(elem)
# If highlight_sample has a tag that isn't in theme_elements, there
for tag in hs.tag_names():
for start_index in hs.tag_ranges(tag)[0::2]:
count += 1
click_it(start_index)
eq(d.highlight_target.get(), elem[tag])
eq(d.set_highlight_target.called, count)
def test_highlight_sample_double_click(self):
eq = self.assertEqual
d = self.page
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
hs.event_generate('<Enter>', x=0, y=0)
hs.event_generate('<Motion>', x=0, y=0)
for _ in range(2):
hs.event_generate('<ButtonPress-1>', x=0, y=0)
hs.event_generate('<ButtonRelease-1>', x=0, y=0)
eq(hs.tag_ranges('sel'), ())
def test_highlight_sample_b1_motion(self):
eq = self.assertEqual
d = self.page
hs = d.highlight_sample
hs.focus_force()
hs.see(1.0)
hs.update_idletasks()
x, y, dx, dy, offset = hs.dlineinfo('1.0')
hs.event_generate('<Leave>')
hs.event_generate('<Enter>')
hs.event_generate('<Motion>', x=x, y=y)
hs.event_generate('<ButtonPress-1>', x=x, y=y)
hs.event_generate('<B1-Motion>', x=dx, y=dy)
hs.event_generate('<ButtonRelease-1>', x=dx, y=dy)
eq(hs.tag_ranges('sel'), ())
def test_set_theme_type(self):
eq = self.assertEqual
d = self.page
del d.set_theme_type
d.theme_source.set(True)
d.set_theme_type()
eq(d.builtinlist['state'], NORMAL)
eq(d.customlist['state'], DISABLED)
eq(d.button_delete_custom.state(), ('disabled',))
d.theme_source.set(False)
d.set_theme_type()
eq(d.builtinlist['state'], DISABLED)
eq(d.custom_theme_on.state(), ('selected',))
eq(d.customlist['state'], NORMAL)
eq(d.button_delete_custom.state(), ())
d.set_theme_type = Func()
def test_get_color(self):
eq = self.assertEqual
d = self.page
orig_chooser = configdialog.colorchooser.askcolor
chooser = configdialog.colorchooser.askcolor = Func()
gntn = d.get_new_theme_name = Func()
d.highlight_target.set('Editor Breakpoint')
d.color.set('#ffffff')
chooser.result = (None, None)
d.button_set_color.invoke()
eq(d.color.get(), '#ffffff')
chooser.result = ('', d.style.lookup(d.frame_color_set['style'], 'background'))
d.button_set_color.invoke()
eq(d.color.get(), '#ffffff')
chooser.result = ((222.8671875, 0.0, 0.0), '#de0000')
d.color.set('#ffffff')
d.theme_source.set(True)
gntn.result = ''
d.button_set_color.invoke()
eq(gntn.called, 1)
eq(d.color.get(), '#ffffff')
gntn.result = 'My New Theme'
d.button_set_color.invoke()
eq(d.custom_name.get(), gntn.result)
eq(d.color.get(), '#de0000')
d.color.set('#ffffff')
d.theme_source.set(False)
d.button_set_color.invoke()
eq(d.color.get(), '#de0000')
del d.get_new_theme_name
configdialog.colorchooser.askcolor = orig_chooser
def test_on_new_color_set(self):
d = self.page
color = '#3f7cae'
d.custom_name.set('Python')
d.highlight_target.set('Selected Text')
d.fg_bg_toggle.set(True)
d.color.set(color)
self.assertEqual(d.style.lookup(d.frame_color_set['style'], 'background'), color)
self.assertEqual(d.highlight_sample.tag_cget('hilite', 'foreground'), color)
self.assertEqual(highpage,
{'Python': {'hilite-foreground': color}})
def test_get_new_theme_name(self):
orig_sectionname = configdialog.SectionName
sn = configdialog.SectionName = Func(return_self=True)
d = self.page
sn.result = 'New Theme'
self.assertEqual(d.get_new_theme_name(''), 'New Theme')
configdialog.SectionName = orig_sectionname
def test_save_as_new_theme(self):
d = self.page
gntn = d.get_new_theme_name = Func()
d.theme_source.set(True)
gntn.result = ''
d.button_save_custom.invoke()
self.assertNotIn(gntn.result, idleConf.userCfg['highlight'])
gntn.result = 'my new theme'
gntn.called = 0
self.assertNotIn(gntn.result, idleConf.userCfg['highlight'])
d.button_save_custom.invoke()
self.assertIn(gntn.result, idleConf.userCfg['highlight'])
del d.get_new_theme_name
def test_create_new_and_save_new(self):
eq = self.assertEqual
d = self.page
d.theme_source.set(True)
d.builtin_name.set('IDLE Classic')
first_new = 'my new custom theme'
second_new = 'my second custom theme'
self.assertNotIn(first_new, idleConf.userCfg)
d.create_new(first_new)
eq(idleConf.GetSectionList('user', 'highlight'), [first_new])
eq(idleConf.GetThemeDict('default', 'IDLE Classic'),
idleConf.GetThemeDict('user', first_new))
eq(d.custom_name.get(), first_new)
self.assertFalse(d.theme_source.get())
eq(d.set_theme_type.called, 1)
changes.add_option('highlight', first_new, 'hit-background', 'yellow')
self.assertNotIn(second_new, idleConf.userCfg)
d.create_new(second_new)
eq(idleConf.GetSectionList('user', 'highlight'), [first_new, second_new])
self.assertNotEqual(idleConf.GetThemeDict('user', first_new),
idleConf.GetThemeDict('user', second_new))
idleConf.SetOption('highlight', first_new, 'hit-background', 'yellow')
eq(idleConf.GetThemeDict('user', first_new),
idleConf.GetThemeDict('user', second_new))
def test_set_highlight_target(self):
eq = self.assertEqual
d = self.page
del d.set_highlight_target
d.highlight_target.set('Cursor')
eq(d.fg_on.state(), ('disabled', 'selected'))
eq(d.bg_on.state(), ('disabled',))
self.assertTrue(d.fg_bg_toggle)
eq(d.set_color_sample.called, 1)
d.highlight_target.set('Comment')
eq(d.fg_on.state(), ('selected',))
eq(d.bg_on.state(), ())
self.assertTrue(d.fg_bg_toggle)
eq(d.set_color_sample.called, 2)
d.set_highlight_target = Func()
def test_set_color_sample_binding(self):
d = self.page
scs = d.set_color_sample
d.fg_on.invoke()
self.assertEqual(scs.called, 1)
d.bg_on.invoke()
self.assertEqual(scs.called, 2)
def test_set_color_sample(self):
d = self.page
del d.set_color_sample
d.highlight_target.set('Selected Text')
d.fg_bg_toggle.set(True)
d.set_color_sample()
self.assertEqual(
d.style.lookup(d.frame_color_set['style'], 'background'),
d.highlight_sample.tag_cget('hilite', 'foreground'))
d.set_color_sample = Func()
def test_paint_theme_sample(self):
eq = self.assertEqual
page = self.page
del page.paint_theme_sample
hs_tag = page.highlight_sample.tag_cget
gh = idleConf.GetHighlight
page.theme_source.set(True)
page.builtin_name.set('IDLE Dark')
theme = 'IDLE Test'
page.create_new(theme)
page.set_color_sample.called = 0
page.paint_theme_sample()
new_console = {'foreground': 'blue',
'background': 'yellow',}
for key, value in new_console.items():
self.assertNotEqual(hs_tag('console', key), value)
eq(page.set_color_sample.called, 1)
for key, value in new_console.items():
changes.add_option('highlight', theme, 'console-'+key, value)
page.paint_theme_sample()
for key, value in new_console.items():
eq(hs_tag('console', key), value)
eq(page.set_color_sample.called, 2)
page.paint_theme_sample = Func()
def test_delete_custom(self):
eq = self.assertEqual
d = self.page
d.button_delete_custom.state(('!disabled',))
yesno = d.askyesno = Func()
dialog.deactivate_current_config = Func()
dialog.activate_config_changes = Func()
theme_name = 'spam theme'
idleConf.userCfg['highlight'].SetOption(theme_name, 'name', 'value')
highpage[theme_name] = {'option': 'True'}
theme_name2 = 'other theme'
idleConf.userCfg['highlight'].SetOption(theme_name2, 'name', 'value')
highpage[theme_name2] = {'option': 'False'}
d.custom_theme_on.state(('!disabled',))
d.custom_theme_on.invoke()
d.custom_name.set(theme_name)
yesno.result = False
d.button_delete_custom.invoke()
eq(yesno.called, 1)
eq(highpage[theme_name], {'option': 'True'})
eq(idleConf.GetSectionList('user', 'highlight'), [theme_name, theme_name2])
eq(dialog.deactivate_current_config.called, 0)
eq(dialog.activate_config_changes.called, 0)
eq(d.set_theme_type.called, 0)
yesno.result = True
d.button_delete_custom.invoke()
eq(yesno.called, 2)
self.assertNotIn(theme_name, highpage)
eq(idleConf.GetSectionList('user', 'highlight'), [theme_name2])
eq(d.custom_theme_on.state(), ())
eq(d.custom_name.get(), theme_name2)
eq(dialog.deactivate_current_config.called, 1)
eq(dialog.activate_config_changes.called, 1)
eq(d.set_theme_type.called, 1)
d.custom_name.set(theme_name2)
yesno.result = True
d.button_delete_custom.invoke()
eq(yesno.called, 3)
self.assertNotIn(theme_name, highpage)
eq(idleConf.GetSectionList('user', 'highlight'), [])
eq(d.custom_theme_on.state(), ('disabled',))
eq(d.custom_name.get(), '- no custom themes -')
eq(dialog.deactivate_current_config.called, 2)
eq(dialog.activate_config_changes.called, 2)
eq(d.set_theme_type.called, 2)
del dialog.activate_config_changes, dialog.deactivate_current_config
del d.askyesno
class KeysPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = cls.page = dialog.keyspage
dialog.note.select(page)
page.set_keys_type = Func()
page.load_keys_list = Func()
@classmethod
def tearDownClass(cls):
page = cls.page
del page.set_keys_type, page.load_keys_list
def setUp(self):
d = self.page
for section in idleConf.GetSectionList('user', 'keys'):
idleConf.userCfg['keys'].remove_section(section)
changes.clear()
d.set_keys_type.called = 0
d.load_keys_list.called = 0
def test_load_key_cfg(self):
tracers.detach()
d = self.page
eq = self.assertEqual
idleConf.CurrentKeys = mock.Mock(return_value='IDLE Classic OSX')
d.load_key_cfg()
self.assertTrue(d.keyset_source.get())
eq(d.builtin_name.get(), 'IDLE Classic OSX')
eq(d.custom_name.get(), '- no custom keys -')
eq(d.custom_keyset_on.state(), ('disabled',))
eq(d.set_keys_type.called, 1)
eq(d.load_keys_list.called, 1)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
idleConf.SetOption('keys', 'test1', 'option', 'value')
idleConf.SetOption('keys', 'test2', 'option2', 'value2')
d.load_key_cfg()
eq(d.builtin_name.get(), 'IDLE Classic OSX')
eq(d.custom_name.get(), 'test1')
eq(d.set_keys_type.called, 2)
eq(d.load_keys_list.called, 2)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
idleConf.CurrentKeys = mock.Mock(return_value='test2')
idleConf.default_keys = mock.Mock(return_value='IDLE Modern Unix')
idleConf.SetOption('main', 'Keys', 'default', '0')
d.load_key_cfg()
self.assertFalse(d.keyset_source.get())
eq(d.builtin_name.get(), 'IDLE Modern Unix')
eq(d.custom_name.get(), 'test2')
eq(d.set_keys_type.called, 3)
eq(d.load_keys_list.called, 3)
eq(d.load_keys_list.args, ('test2', ))
del idleConf.CurrentKeys, idleConf.default_keys
tracers.attach()
def test_keyset_source(self):
eq = self.assertEqual
d = self.page
d.var_changed_builtin_name = Func()
d.var_changed_custom_name = Func()
d.builtin_keyset_on.invoke()
eq(mainpage, {'Keys': {'default': 'True'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 0)
changes.clear()
d.custom_keyset_on.state(('!disabled',))
d.custom_keyset_on.invoke()
self.assertEqual(mainpage, {'Keys': {'default': 'False'}})
eq(d.var_changed_builtin_name.called, 1)
eq(d.var_changed_custom_name.called, 1)
del d.var_changed_builtin_name, d.var_changed_custom_name
def test_builtin_name(self):
eq = self.assertEqual
d = self.page
idleConf.userCfg['main'].remove_section('Keys')
item_list = ['IDLE Classic Windows', 'IDLE Classic OSX',
'IDLE Modern UNIX']
d.builtinlist.SetMenu(item_list, 'IDLE Modern UNIX')
eq(mainpage, {'Keys': {'name': 'IDLE Classic Windows',
'name2': 'IDLE Modern UNIX'}})
eq(d.keys_message['text'], 'New key set, see Help')
eq(d.load_keys_list.called, 1)
eq(d.load_keys_list.args, ('IDLE Modern UNIX', ))
changes.clear()
idleConf.SetOption('main', 'Keys', 'name', 'IDLE Classic Unix')
d.builtinlist.SetMenu(item_list, 'IDLE Modern UNIX')
eq(mainpage, {'Keys': {'name2': 'IDLE Modern UNIX'}})
eq(d.keys_message['text'], 'New key set, see Help')
eq(d.load_keys_list.called, 2)
eq(d.load_keys_list.args, ('IDLE Modern UNIX', ))
changes.clear()
d.builtinlist.SetMenu(item_list, 'IDLE Classic OSX')
eq(mainpage, {'Keys': {'name': 'IDLE Classic OSX', 'name2': ''}})
eq(d.keys_message['text'], '')
eq(d.load_keys_list.called, 3)
eq(d.load_keys_list.args, ('IDLE Classic OSX', ))
def test_custom_name(self):
d = self.page
d.customlist.SetMenu([], '- no custom keys -')
self.assertNotIn('Keys', mainpage)
self.assertEqual(d.load_keys_list.called, 0)
# Custom name selected.
changes.clear()
d.customlist.SetMenu(['a', 'b', 'c'], 'c')
self.assertEqual(mainpage, {'Keys': {'name': 'c'}})
self.assertEqual(d.load_keys_list.called, 1)
def test_keybinding(self):
idleConf.SetOption('extensions', 'ZzDummy', 'enable', 'True')
d = self.page
d.custom_name.set('my custom keys')
d.bindingslist.delete(0, 'end')
d.bindingslist.insert(0, 'copy')
d.bindingslist.insert(1, 'z-in')
d.bindingslist.selection_set(0)
d.bindingslist.selection_anchor(0)
# Core binding - adds to keys.
d.keybinding.set('<Key-F11>')
self.assertEqual(keyspage,
{'my custom keys': {'copy': '<Key-F11>'}})
# Not a core binding - adds to extensions.
d.bindingslist.selection_set(1)
d.bindingslist.selection_anchor(1)
d.keybinding.set('<Key-F11>')
self.assertEqual(extpage,
{'ZzDummy_cfgBindings': {'z-in': '<Key-F11>'}})
def test_set_keys_type(self):
eq = self.assertEqual
d = self.page
del d.set_keys_type
# Builtin keyset selected.
d.keyset_source.set(True)
d.set_keys_type()
eq(d.builtinlist['state'], NORMAL)
eq(d.customlist['state'], DISABLED)
eq(d.button_delete_custom_keys.state(), ('disabled',))
# Custom keyset selected.
d.keyset_source.set(False)
d.set_keys_type()
eq(d.builtinlist['state'], DISABLED)
eq(d.custom_keyset_on.state(), ('selected',))
eq(d.customlist['state'], NORMAL)
eq(d.button_delete_custom_keys.state(), ())
d.set_keys_type = Func()
def test_get_new_keys(self):
eq = self.assertEqual
d = self.page
orig_getkeysdialog = configdialog.GetKeysDialog
gkd = configdialog.GetKeysDialog = Func(return_self=True)
gnkn = d.get_new_keys_name = Func()
d.button_new_keys.state(('!disabled',))
d.bindingslist.delete(0, 'end')
d.bindingslist.insert(0, 'copy - <Control-Shift-Key-C>')
d.bindingslist.selection_set(0)
d.bindingslist.selection_anchor(0)
d.keybinding.set('Key-a')
d.keyset_source.set(True) # Default keyset.
# Default keyset; no change to binding.
gkd.result = ''
d.button_new_keys.invoke()
eq(d.bindingslist.get('anchor'), 'copy - <Control-Shift-Key-C>')
# Keybinding isn't changed when there isn't a change entered.
eq(d.keybinding.get(), 'Key-a')
# Default keyset; binding changed.
gkd.result = '<Key-F11>'
# No keyset name selected therefore binding not saved.
gnkn.result = ''
d.button_new_keys.invoke()
eq(gnkn.called, 1)
eq(d.bindingslist.get('anchor'), 'copy - <Control-Shift-Key-C>')
# Keyset name selected.
gnkn.result = 'My New Key Set'
d.button_new_keys.invoke()
eq(d.custom_name.get(), gnkn.result)
eq(d.bindingslist.get('anchor'), 'copy - <Key-F11>')
eq(d.keybinding.get(), '<Key-F11>')
# User keyset; binding changed.
d.keyset_source.set(False) # Custom keyset.
gnkn.called = 0
gkd.result = '<Key-p>'
d.button_new_keys.invoke()
eq(gnkn.called, 0)
eq(d.bindingslist.get('anchor'), 'copy - <Key-p>')
eq(d.keybinding.get(), '<Key-p>')
del d.get_new_keys_name
configdialog.GetKeysDialog = orig_getkeysdialog
def test_get_new_keys_name(self):
orig_sectionname = configdialog.SectionName
sn = configdialog.SectionName = Func(return_self=True)
d = self.page
sn.result = 'New Keys'
self.assertEqual(d.get_new_keys_name(''), 'New Keys')
configdialog.SectionName = orig_sectionname
def test_save_as_new_key_set(self):
d = self.page
gnkn = d.get_new_keys_name = Func()
d.keyset_source.set(True)
# No name entered.
gnkn.result = ''
d.button_save_custom_keys.invoke()
# Name entered.
gnkn.result = 'my new key set'
gnkn.called = 0
self.assertNotIn(gnkn.result, idleConf.userCfg['keys'])
d.button_save_custom_keys.invoke()
self.assertIn(gnkn.result, idleConf.userCfg['keys'])
del d.get_new_keys_name
def test_on_bindingslist_select(self):
d = self.page
b = d.bindingslist
b.delete(0, 'end')
b.insert(0, 'copy')
b.insert(1, 'find')
b.activate(0)
b.focus_force()
b.see(1)
b.update()
x, y, dx, dy = b.bbox(1)
x += dx // 2
y += dy // 2
b.event_generate('<Enter>', x=0, y=0)
b.event_generate('<Motion>', x=x, y=y)
b.event_generate('<Button-1>', x=x, y=y)
b.event_generate('<ButtonRelease-1>', x=x, y=y)
self.assertEqual(b.get('anchor'), 'find')
self.assertEqual(d.button_new_keys.state(), ())
def test_create_new_key_set_and_save_new_key_set(self):
eq = self.assertEqual
d = self.page
# Use default as previously active keyset.
d.keyset_source.set(True)
d.builtin_name.set('IDLE Classic Windows')
first_new = 'my new custom key set'
second_new = 'my second custom keyset'
# No changes, so keysets are an exact copy.
self.assertNotIn(first_new, idleConf.userCfg)
d.create_new_key_set(first_new)
eq(idleConf.GetSectionList('user', 'keys'), [first_new])
eq(idleConf.GetKeySet('IDLE Classic Windows'),
idleConf.GetKeySet(first_new))
eq(d.custom_name.get(), first_new)
self.assertFalse(d.keyset_source.get()) # Use custom set.
eq(d.set_keys_type.called, 1)
# Test that changed keybindings are in new keyset.
changes.add_option('keys', first_new, 'copy', '<Key-F11>')
self.assertNotIn(second_new, idleConf.userCfg)
d.create_new_key_set(second_new)
eq(idleConf.GetSectionList('user', 'keys'), [first_new, second_new])
self.assertNotEqual(idleConf.GetKeySet(first_new),
idleConf.GetKeySet(second_new))
# Check that difference in keysets was in option `copy` from `changes`.
idleConf.SetOption('keys', first_new, 'copy', '<Key-F11>')
eq(idleConf.GetKeySet(first_new), idleConf.GetKeySet(second_new))
def test_load_keys_list(self):
eq = self.assertEqual
d = self.page
gks = idleConf.GetKeySet = Func()
del d.load_keys_list
b = d.bindingslist
b.delete(0, 'end')
b.insert(0, '<<find>>')
b.insert(1, '<<help>>')
gks.result = {'<<copy>>': ['<Control-Key-c>', '<Control-Key-C>'],
'<<force-open-completions>>': ['<Control-Key-space>'],
'<<spam>>': ['<Key-F11>']}
changes.add_option('keys', 'my keys', 'spam', '<Shift-Key-a>')
expected = ('copy - <Control-Key-c> <Control-Key-C>',
'force-open-completions - <Control-Key-space>',
'spam - <Shift-Key-a>')
# No current selection.
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), '')
eq(b.curselection(), ())
# Check selection.
b.selection_set(1)
b.selection_anchor(1)
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), 'force-open-completions - <Control-Key-space>')
eq(b.curselection(), (1, ))
# Change selection.
b.selection_set(2)
b.selection_anchor(2)
d.load_keys_list('my keys')
eq(b.get(0, 'end'), expected)
eq(b.get('anchor'), 'spam - <Shift-Key-a>')
eq(b.curselection(), (2, ))
d.load_keys_list = Func()
del idleConf.GetKeySet
def test_delete_custom_keys(self):
eq = self.assertEqual
d = self.page
d.button_delete_custom_keys.state(('!disabled',))
yesno = d.askyesno = Func()
dialog.deactivate_current_config = Func()
dialog.activate_config_changes = Func()
keyset_name = 'spam key set'
idleConf.userCfg['keys'].SetOption(keyset_name, 'name', 'value')
keyspage[keyset_name] = {'option': 'True'}
keyset_name2 = 'other key set'
idleConf.userCfg['keys'].SetOption(keyset_name2, 'name', 'value')
keyspage[keyset_name2] = {'option': 'False'}
# Force custom keyset.
d.custom_keyset_on.state(('!disabled',))
d.custom_keyset_on.invoke()
d.custom_name.set(keyset_name)
# Cancel deletion.
yesno.result = False
d.button_delete_custom_keys.invoke()
eq(yesno.called, 1)
eq(keyspage[keyset_name], {'option': 'True'})
eq(idleConf.GetSectionList('user', 'keys'), [keyset_name, keyset_name2])
eq(dialog.deactivate_current_config.called, 0)
eq(dialog.activate_config_changes.called, 0)
eq(d.set_keys_type.called, 0)
# Confirm deletion.
yesno.result = True
d.button_delete_custom_keys.invoke()
eq(yesno.called, 2)
self.assertNotIn(keyset_name, keyspage)
eq(idleConf.GetSectionList('user', 'keys'), [keyset_name2])
eq(d.custom_keyset_on.state(), ())
eq(d.custom_name.get(), keyset_name2)
eq(dialog.deactivate_current_config.called, 1)
eq(dialog.activate_config_changes.called, 1)
eq(d.set_keys_type.called, 1)
# Confirm deletion of second keyset - empties list.
d.custom_name.set(keyset_name2)
yesno.result = True
d.button_delete_custom_keys.invoke()
eq(yesno.called, 3)
self.assertNotIn(keyset_name, keyspage)
eq(idleConf.GetSectionList('user', 'keys'), [])
eq(d.custom_keyset_on.state(), ('disabled',))
eq(d.custom_name.get(), '- no custom keys -')
eq(dialog.deactivate_current_config.called, 2)
eq(dialog.activate_config_changes.called, 2)
eq(d.set_keys_type.called, 2)
del dialog.activate_config_changes, dialog.deactivate_current_config
del d.askyesno
class WinPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = cls.page = dialog.winpage
dialog.note.select(page)
page.update()
def setUp(self):
changes.clear()
def test_load_windows_cfg(self):
# Set to wrong values, load, check right values.
eq = self.assertEqual
d = self.page
d.startup_edit.set(1)
d.win_width.set(1)
d.win_height.set(1)
d.load_windows_cfg()
eq(d.startup_edit.get(), 0)
eq(d.win_width.get(), '80')
eq(d.win_height.get(), '40')
def test_startup(self):
d = self.page
d.startup_editor_on.invoke()
self.assertEqual(mainpage,
{'General': {'editor-on-startup': '1'}})
changes.clear()
d.startup_shell_on.invoke()
self.assertEqual(mainpage,
{'General': {'editor-on-startup': '0'}})
def test_editor_size(self):
d = self.page
d.win_height_int.delete(0, 'end')
d.win_height_int.insert(0, '11')
self.assertEqual(mainpage, {'EditorWindow': {'height': '11'}})
changes.clear()
d.win_width_int.delete(0, 'end')
d.win_width_int.insert(0, '11')
self.assertEqual(mainpage, {'EditorWindow': {'width': '11'}})
def test_cursor_blink(self):
self.page.cursor_blink_bool.invoke()
self.assertEqual(mainpage, {'EditorWindow': {'cursor-blink': 'False'}})
def test_autocomplete_wait(self):
self.page.auto_wait_int.delete(0, 'end')
self.page.auto_wait_int.insert(0, '11')
self.assertEqual(extpage, {'AutoComplete': {'popupwait': '11'}})
def test_parenmatch(self):
d = self.page
eq = self.assertEqual
d.paren_style_type['menu'].invoke(0)
eq(extpage, {'ParenMatch': {'style': 'opener'}})
changes.clear()
d.paren_flash_time.delete(0, 'end')
d.paren_flash_time.insert(0, '11')
eq(extpage, {'ParenMatch': {'flash-delay': '11'}})
changes.clear()
d.bell_on.invoke()
eq(extpage, {'ParenMatch': {'bell': 'False'}})
def test_paragraph(self):
self.page.format_width_int.delete(0, 'end')
self.page.format_width_int.insert(0, '11')
self.assertEqual(extpage, {'FormatParagraph': {'max-width': '11'}})
class GenPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = cls.page = dialog.shedpage
dialog.note.select(page)
page.update()
def setUp(self):
changes.clear()
def test_load_shelled_cfg(self):
# Set to wrong values, load, check right values.
eq = self.assertEqual
d = self.page
d.autosave.set(1)
d.load_shelled_cfg()
eq(d.autosave.get(), 0)
def test_autosave(self):
d = self.page
d.save_auto_on.invoke()
self.assertEqual(mainpage, {'General': {'autosave': '1'}})
d.save_ask_on.invoke()
self.assertEqual(mainpage, {'General': {'autosave': '0'}})
def test_context(self):
self.page.context_int.delete(0, 'end')
self.page.context_int.insert(0, '1')
self.assertEqual(extpage, {'CodeContext': {'maxlines': '1'}})
#unittest.skip("Nothing here yet TODO")
class ExtPageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = dialog.extpage
dialog.note.select(page)
class HelpSourceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
page = dialog.extpage
dialog.note.select(page)
frame = cls.frame = page.frame_help
frame.set = frame.set_add_delete_state = Func()
frame.upc = frame.update_help_changes = Func()
frame.update()
@classmethod
def tearDownClass(cls):
frame = cls.frame
del frame.set, frame.set_add_delete_state
del frame.upc, frame.update_help_changes
frame.helplist.delete(0, 'end')
frame.user_helplist.clear()
def setUp(self):
changes.clear()
def test_load_helplist(self):
eq = self.assertEqual
fr = self.frame
fr.helplist.insert('end', 'bad')
fr.user_helplist = ['bad', 'worse']
idleConf.SetOption('main', 'HelpFiles', '1', 'name;file')
fr.load_helplist()
eq(fr.helplist.get(0, 'end'), ('name',))
eq(fr.user_helplist, [('name', 'file', '1')])
def test_source_selected(self):
fr = self.frame
fr.set = fr.set_add_delete_state
fr.upc = fr.update_help_changes
helplist = fr.helplist
dex = 'end'
helplist.insert(dex, 'source')
helplist.activate(dex)
helplist.focus_force()
helplist.see(dex)
helplist.update()
x, y, dx, dy = helplist.bbox(dex)
x += dx // 2
y += dy // 2
fr.set.called = fr.upc.called = 0
helplist.event_generate('<Enter>', x=0, y=0)
helplist.event_generate('<Motion>', x=x, y=y)
helplist.event_generate('<Button-1>', x=x, y=y)
helplist.event_generate('<ButtonRelease-1>', x=x, y=y)
self.assertEqual(helplist.get('anchor'), 'source')
self.assertTrue(fr.set.called)
self.assertFalse(fr.upc.called)
def test_set_add_delete_state(self):
# Call with 0 items, 1 unselected item, 1 selected item.
eq = self.assertEqual
fr = self.frame
del fr.set_add_delete_state # Unmask method.
sad = fr.set_add_delete_state
h = fr.helplist
h.delete(0, 'end')
sad()
eq(fr.button_helplist_edit.state(), ('disabled',))
eq(fr.button_helplist_remove.state(), ('disabled',))
h.insert(0, 'source')
sad()
eq(fr.button_helplist_edit.state(), ('disabled',))
eq(fr.button_helplist_remove.state(), ('disabled',))
h.selection_set(0)
sad()
eq(fr.button_helplist_edit.state(), ())
eq(fr.button_helplist_remove.state(), ())
fr.set_add_delete_state = Func() # Mask method.
def test_helplist_item_add(self):
# Call without and twice with HelpSource result.
# Double call enables check on order.
eq = self.assertEqual
orig_helpsource = configdialog.HelpSource
hs = configdialog.HelpSource = Func(return_self=True)
fr = self.frame
fr.helplist.delete(0, 'end')
fr.user_helplist.clear()
fr.set.called = fr.upc.called = 0
hs.result = ''
fr.helplist_item_add()
self.assertTrue(list(fr.helplist.get(0, 'end')) ==
fr.user_helplist == [])
self.assertFalse(fr.upc.called)
hs.result = ('name1', 'file1')
fr.helplist_item_add()
hs.result = ('name2', 'file2')
fr.helplist_item_add()
eq(fr.helplist.get(0, 'end'), ('name1', 'name2'))
eq(fr.user_helplist, [('name1', 'file1'), ('name2', 'file2')])
eq(fr.upc.called, 2)
self.assertFalse(fr.set.called)
configdialog.HelpSource = orig_helpsource
def test_helplist_item_edit(self):
# Call without and with HelpSource change.
eq = self.assertEqual
orig_helpsource = configdialog.HelpSource
hs = configdialog.HelpSource = Func(return_self=True)
fr = self.frame
fr.helplist.delete(0, 'end')
fr.helplist.insert(0, 'name1')
fr.helplist.selection_set(0)
fr.helplist.selection_anchor(0)
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.set.called = fr.upc.called = 0
hs.result = ''
fr.helplist_item_edit()
hs.result = ('name1', 'file1')
fr.helplist_item_edit()
eq(fr.helplist.get(0, 'end'), ('name1',))
eq(fr.user_helplist, [('name1', 'file1')])
self.assertFalse(fr.upc.called)
hs.result = ('name2', 'file2')
fr.helplist_item_edit()
eq(fr.helplist.get(0, 'end'), ('name2',))
eq(fr.user_helplist, [('name2', 'file2')])
self.assertTrue(fr.upc.called == fr.set.called == 1)
configdialog.HelpSource = orig_helpsource
def test_helplist_item_remove(self):
eq = self.assertEqual
fr = self.frame
fr.helplist.delete(0, 'end')
fr.helplist.insert(0, 'name1')
fr.helplist.selection_set(0)
fr.helplist.selection_anchor(0)
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.set.called = fr.upc.called = 0
fr.helplist_item_remove()
eq(fr.helplist.get(0, 'end'), ())
eq(fr.user_helplist, [])
self.assertTrue(fr.upc.called == fr.set.called == 1)
def test_update_help_changes(self):
fr = self.frame
del fr.update_help_changes
fr.user_helplist.clear()
fr.user_helplist.append(('name1', 'file1'))
fr.user_helplist.append(('name2', 'file2'))
fr.update_help_changes()
self.assertEqual(mainpage['HelpFiles'],
{'1': 'name1;file1', '2': 'name2;file2'})
fr.update_help_changes = Func()
class VarTraceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tracers = configdialog.VarTrace()
cls.iv = IntVar(root)
cls.bv = BooleanVar(root)
@classmethod
def tearDownClass(cls):
del cls.tracers, cls.iv, cls.bv
def setUp(self):
self.tracers.clear()
self.called = 0
def var_changed_increment(self, *params):
self.called += 13
def var_changed_boolean(self, *params):
pass
def test_init(self):
tr = self.tracers
tr.__init__()
self.assertEqual(tr.untraced, [])
self.assertEqual(tr.traced, [])
def test_clear(self):
tr = self.tracers
tr.untraced.append(0)
tr.traced.append(1)
tr.clear()
self.assertEqual(tr.untraced, [])
self.assertEqual(tr.traced, [])
def test_add(self):
tr = self.tracers
func = Func()
cb = tr.make_callback = mock.Mock(return_value=func)
iv = tr.add(self.iv, self.var_changed_increment)
self.assertIs(iv, self.iv)
bv = tr.add(self.bv, self.var_changed_boolean)
self.assertIs(bv, self.bv)
sv = StringVar(root)
sv2 = tr.add(sv, ('main', 'section', 'option'))
self.assertIs(sv2, sv)
cb.assert_called_once()
cb.assert_called_with(sv, ('main', 'section', 'option'))
expected = [(iv, self.var_changed_increment),
(bv, self.var_changed_boolean),
(sv, func)]
self.assertEqual(tr.traced, [])
self.assertEqual(tr.untraced, expected)
del tr.make_callback
def test_make_callback(self):
cb = self.tracers.make_callback(self.iv, ('main', 'section', 'option'))
self.assertTrue(callable(cb))
self.iv.set(42)
# Not attached, so set didn't invoke the callback.
self.assertNotIn('section', changes['main'])
cb()
self.assertIn('section', changes['main'])
self.assertEqual(changes['main']['section']['option'], '42')
changes.clear()
def test_attach_detach(self):
tr = self.tracers
iv = tr.add(self.iv, self.var_changed_increment)
bv = tr.add(self.bv, self.var_changed_boolean)
expected = [(iv, self.var_changed_increment),
(bv, self.var_changed_boolean)]
tr.attach()
self.assertEqual(tr.untraced, [])
self.assertCountEqual(tr.traced, expected)
iv.set(1)
self.assertEqual(iv.get(), 1)
self.assertEqual(self.called, 13)
self.called = 0
tr.attach()
iv.set(1)
self.assertEqual(self.called, 13)
self.called = 0
tr.detach()
self.assertEqual(tr.traced, [])
self.assertCountEqual(tr.untraced, expected)
iv.set(1)
self.assertEqual(self.called, 0)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f71d1c9e00baedf4bf218ec2f5eaa682d4f57d68 | 1,911 | py | Python | python_exercises/question_directory/boolean_review.py | nchristie/slide-python-intro | dd52781b5d25435f97aa83cfff58c175fa7fdd1c | [
"MIT"
] | 1 | 2018-06-07T12:40:37.000Z | 2018-06-07T12:40:37.000Z | python_exercises/question_directory/boolean_review.py | nchristie/slide-python-intro | dd52781b5d25435f97aa83cfff58c175fa7fdd1c | [
"MIT"
] | 3 | 2018-06-07T14:39:19.000Z | 2019-01-15T16:35:23.000Z | python_exercises/question_directory/boolean_review.py | nchristie/slide-python-intro | dd52781b5d25435f97aa83cfff58c175fa7fdd1c | [
"MIT"
] | 9 | 2018-05-30T17:12:27.000Z | 2021-07-01T03:22:48.000Z | # TASKS is a list of lists. Each sublist follows this structure:
# [
# <string of background information to print>,
# <string of question text>,
# <list of answer keywords>,
# <dictionary of prerequisite conditions>
# ]
"""
empty object for copy-paste:
[
"", # background info
"", # question
[], # list of required keywords
{}, # dict of variables to set up before question
],
"""
BLURB = ""
TASKS = [
[
"", # background info
"Write a statement to check if two numbers are equal to each other?\n\n", # question
["=="], # list of required keywords
{}, # dict of variables to set up before question
],
[
"\nwifi_available = False\nmobile_data_available = False", # background info
"What would the output of this statement be?\nwifi_available and mobile_data_available\n", # question
["False"], # list of required keywords
{}, # dict of variables to set up before question
],
[
"\nwifi_available = False\nmobile_data_available = True", # background info
"What would the output of this statement be?\nwifi_available or mobile_data_available\n\n", # question
["True"], # list of required keywords
{}, # dict of variables to set up before question
],
[
"\nmobile_data_available = True", # background info
"What would the output of this statement be?\nnot mobile_data_available\n\n", # question
["False"], # list of required keywords
{}, # dict of variables to set up before question
],
[
"\nwifi_available = False\nmobile_data_available = True", # background info
"What would the output of this be\n\nif wifi_available or mobile_data_available:\n print(True)\n", # question
["True"], # list of required keywords
{}, # dict of variables to set up before question
],
]
| 36.75 | 122 | 0.628467 |
BLURB = ""
TASKS = [
[
"",
"Write a statement to check if two numbers are equal to each other?\n\n",
["=="],
{},
],
[
"\nwifi_available = False\nmobile_data_available = False",
"What would the output of this statement be?\nwifi_available and mobile_data_available\n",
["False"],
{},
],
[
"\nwifi_available = False\nmobile_data_available = True",
"What would the output of this statement be?\nwifi_available or mobile_data_available\n\n",
["True"],
{},
],
[
"\nmobile_data_available = True",
"What would the output of this statement be?\nnot mobile_data_available\n\n",
["False"],
{},
],
[
"\nwifi_available = False\nmobile_data_available = True",
"What would the output of this be\n\nif wifi_available or mobile_data_available:\n print(True)\n",
["True"],
{},
],
]
| true | true |
f71d1cfa1a83dbc18a3f108e583046a55bb6b7ca | 3,066 | py | Python | modules/fsl_semi_query.py | haoxingchen/SSFormers | 1cfa5c01428b8aac43219e7926989f8e9a4d817b | [
"MIT"
] | 17 | 2021-09-28T04:25:36.000Z | 2022-03-28T01:48:11.000Z | modules/fsl_semi_query.py | liyaohui347/SSFormers | 3c2ea14db6a453d3345e03a790dd452af5fde8d8 | [
"MIT"
] | 2 | 2022-02-26T11:25:16.000Z | 2022-03-28T13:28:41.000Z | modules/fsl_semi_query.py | liyaohui347/SSFormers | 3c2ea14db6a453d3345e03a790dd452af5fde8d8 | [
"MIT"
] | 6 | 2021-09-28T13:44:57.000Z | 2022-03-14T05:00:19.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from .encoder import make_encoder
from .semi_query import make_query
class FSLSemiQuery(nn.Module):
def __init__(self, cfg):
super().__init__()
self.encoder = make_encoder(cfg)
self.query = make_query(self.encoder.out_channels, cfg)
self.forward_encoding = cfg.model.forward_encoding
self.pyramid_list = self._parse_encoding_params()
def _parse_encoding_params(self):
idx = self.forward_encoding.find('-')
if idx < 0:
return []
blocks = self.forward_encoding[idx + 1:].split(',')
blocks = [int(s) for s in blocks]
return blocks
def _pyramid_encoding(self, x):
b, n, c, h, w = x.shape
x = x.view(-1, c, h, w)
feature_list = []
for size_ in self.pyramid_list:
feature_list.append(F.adaptive_avg_pool2d(x, size_).view(b, n, c, 1, -1))
if not feature_list:
out = x.view(b, n, c, 1, -1)
else:
out = torch.cat(feature_list, dim=-1)
return out
def forward_Grid(self, support_x, support_y, query_x, query_y, unlabeled_x):
b, s, grids_sc, h, w = support_x.shape
grids_s = grids_sc // 3
_, q, grids_qc = query_x.shape[:3]
grids_q = grids_qc // 3
support_xf = F.adaptive_avg_pool2d(self.encoder(support_x.view(-1, 3, h, w)), 1)
support_xf = support_xf.view(b, s, grids_s, -1).permute(0, 1, 3, 2).unsqueeze(-1)
query_xf = F.adaptive_avg_pool2d(self.encoder(query_x.view(-1, 3, h, w)), 1)
query_xf = query_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)
unlabeled_xf = F.adaptive_avg_pool2d(self.encoder(unlabeled_x.view(-1, 3, h, w)), 1)
unlabeled_xf = unlabeled_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)
query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)
return query
def forward_FCN(self, support_x, support_y, query_x, query_y, unlabeled_x):
b, s, c, h, w = support_x.shape
q = query_x.shape[1]
support_xf = self.encoder(support_x.view(-1, c, h, w))
query_xf = self.encoder(query_x.view(-1, c, h, w))
unlabeled_xf = self.encoder(unlabeled_x.view(-1, c, h, w))
fc, fh, fw = support_xf.shape[-3:]
support_xf = support_xf.view(b, s, fc, fh, fw)
query_xf = query_xf.view(b, q, fc, fh, fw)
query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)
return query
def forward(self, support_x, support_y, query_x, query_y, unlabeled_x):
if self.forward_encoding == "FCN":
query = self.forward_FCN(support_x, support_y, query_x, query_y, unlabeled_x)
elif self.forward_encoding.startswith("Grid"):
query = self.forward_Grid(support_x, support_y, query_x, query_y, unlabeled_x)
else:
raise NotImplementedError
return query
def make_semi_fsl(cfg):
return FSLSemiQuery(cfg)
| 36.939759 | 93 | 0.62394 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .encoder import make_encoder
from .semi_query import make_query
class FSLSemiQuery(nn.Module):
def __init__(self, cfg):
super().__init__()
self.encoder = make_encoder(cfg)
self.query = make_query(self.encoder.out_channels, cfg)
self.forward_encoding = cfg.model.forward_encoding
self.pyramid_list = self._parse_encoding_params()
def _parse_encoding_params(self):
idx = self.forward_encoding.find('-')
if idx < 0:
return []
blocks = self.forward_encoding[idx + 1:].split(',')
blocks = [int(s) for s in blocks]
return blocks
def _pyramid_encoding(self, x):
b, n, c, h, w = x.shape
x = x.view(-1, c, h, w)
feature_list = []
for size_ in self.pyramid_list:
feature_list.append(F.adaptive_avg_pool2d(x, size_).view(b, n, c, 1, -1))
if not feature_list:
out = x.view(b, n, c, 1, -1)
else:
out = torch.cat(feature_list, dim=-1)
return out
def forward_Grid(self, support_x, support_y, query_x, query_y, unlabeled_x):
b, s, grids_sc, h, w = support_x.shape
grids_s = grids_sc // 3
_, q, grids_qc = query_x.shape[:3]
grids_q = grids_qc // 3
support_xf = F.adaptive_avg_pool2d(self.encoder(support_x.view(-1, 3, h, w)), 1)
support_xf = support_xf.view(b, s, grids_s, -1).permute(0, 1, 3, 2).unsqueeze(-1)
query_xf = F.adaptive_avg_pool2d(self.encoder(query_x.view(-1, 3, h, w)), 1)
query_xf = query_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)
unlabeled_xf = F.adaptive_avg_pool2d(self.encoder(unlabeled_x.view(-1, 3, h, w)), 1)
unlabeled_xf = unlabeled_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)
query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)
return query
def forward_FCN(self, support_x, support_y, query_x, query_y, unlabeled_x):
b, s, c, h, w = support_x.shape
q = query_x.shape[1]
support_xf = self.encoder(support_x.view(-1, c, h, w))
query_xf = self.encoder(query_x.view(-1, c, h, w))
unlabeled_xf = self.encoder(unlabeled_x.view(-1, c, h, w))
fc, fh, fw = support_xf.shape[-3:]
support_xf = support_xf.view(b, s, fc, fh, fw)
query_xf = query_xf.view(b, q, fc, fh, fw)
query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)
return query
def forward(self, support_x, support_y, query_x, query_y, unlabeled_x):
if self.forward_encoding == "FCN":
query = self.forward_FCN(support_x, support_y, query_x, query_y, unlabeled_x)
elif self.forward_encoding.startswith("Grid"):
query = self.forward_Grid(support_x, support_y, query_x, query_y, unlabeled_x)
else:
raise NotImplementedError
return query
def make_semi_fsl(cfg):
return FSLSemiQuery(cfg)
| true | true |
f71d1d40e8de3947b43250071b4e25714e0ff638 | 23,138 | py | Python | hw2/train_pg.py | wryoung412/CS294_Deep_RL_fall2017 | 077167de524157cc5f85f40232e5bcf6933ab2f5 | [
"MIT"
] | null | null | null | hw2/train_pg.py | wryoung412/CS294_Deep_RL_fall2017 | 077167de524157cc5f85f40232e5bcf6933ab2f5 | [
"MIT"
] | null | null | null | hw2/train_pg.py | wryoung412/CS294_Deep_RL_fall2017 | 077167de524157cc5f85f40232e5bcf6933ab2f5 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# MY_CODE_HERE
hidden = input_placeholder
for i in range(n_layers):
hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))
return tf.layers.dense(hidden, output_size, output_activation)
def pathlength(path):
return len(path["reward"])
def reward_to_q(rewards, gamma, reward_to_go):
q = np.zeros_like(rewards)
T = len(rewards)
if reward_to_go:
q += rewards
for i in range(1, T):
q[:(T - i)] += gamma * q[i:T]
else:
r = 0
for i in range(T - 1, -1, -1):
r = rewards[i] + gamma * r
q = r * np.ones_like(q)
return q
#============================================================================================#
# Policy Gradient
#============================================================================================#
# batch_size is more natural for PG as we need to take average over paths.
# timesteps_per_batch is more relevant for Q-learning as learning is done step by step.
# CartPole
# Here is a good run
# python train_pg.py CartPole-v0 --n_layers 4 --target_reward 200 --learning_rate 1e-2 --nn_baseline --batch_size 10
# ********** Iteration 8 ************
# total trials: 90
# ----------------------------------------
# | Time | 31.1 |
# | Iteration | 8 |
# | AverageReturn | 200 |
# | StdReturn | 0 |
# | MaxReturn | 200 |
# | MinReturn | 200 |
# | EpLenMean | 200 |
# | EpLenStd | 0 |
# | TimestepsThisBatch | 2e+03 |
# | TimestepsSoFar | 1.15e+04 |
# ----------------------------------------
#
# MountainCar
# Working poorly. It seems some good exploration is needed to get any positive path.
#
# Acrobot
# Similar to MountainCar, but it is possible to randomly get a positive path,
# and then the model starts to learn.
# I can get to about 90 steps. What is the "solve" criterion?
# https://github.com/jonholifield/Acrobot-v1
# Box2D
# https://github.com/pybox2d/pybox2d/blob/master/INSTALL.md
# 'sudo' python setup.py install: should not use sudo in venv, it complains about setuptools not found
# LunarLander
# It does not do that well but works to some extent.
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
# min_timesteps_per_batch=1000,
batch_size=20,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32,
target_reward=None
):
start = time.time()
TODO = 1
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
assert discrete, 'only discrete is implemented'
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# MY_CODE_HERE
sy_logits_na = build_mlp(
sy_ob_no,
ac_dim,
"nn_policy",
n_layers=n_layers,
size=size)
sy_sampled_ac = tf.multinomial(sy_logits_na, 1) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)
else:
# YOUR_CODE_HERE
sy_mean = TODO
sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = TODO
sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
# MY_CODE_HERE
# Loss function that we'll differentiate to get the policy gradient.
# TODO: reduce_mean is not really correct here
loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# MY_CODE_HERE
sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
tf_board = os.path.join('/tmp/gube/hw2')
writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))
writer.add_graph(sess.graph)
merged_summary = tf.summary.merge_all()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
total_trials = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
trials_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0][0] # was ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
total_trials += 1
trials_this_batch += 1
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
# if timesteps_this_batch > min_timesteps_per_batch:
# break
if trials_this_batch == batch_size:
break
total_timesteps += timesteps_this_batch
print('total trials:', total_trials)
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# MY_CODE_HERE
q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
# MY_CODE_HERE
# The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# MY_CODE_HERE
adv_mu = np.mean(adv_n)
adv_std = np.std(adv_n)
# Could be more robust than this
if adv_std == 0.0:
return
# The normalization could be problematic.
# For environments like CartPole, the reward is an integer and is capped at 200.
# When not using base, adv_n could all be 200 and adv_std = 0.
adv_n = (adv_n - adv_mu) / adv_std
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# MY_CODE_HERE
# TODO: what is the right way to fit?
# 1. Using fixed number of steps.
# It might not balance the good vs bad paths well, but 100 seems pretty good.
# 2. Using timesteps as number of steps. This is CartPole specific.
print('timesteps:', timesteps_this_batch)
for i in range(100):
sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# MY_CODE_HERE
sess.run(update_op, feed_dict={sy_ob_no: ob_no,
sy_ac_na: ac_na,
sy_adv_n: adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
# This stopping criterion is not robust when the batch size is small.
if target_reward is not None:
if np.mean([path["reward"].sum() for path in paths]) >= target_reward:
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
parser.add_argument('--target_reward', type=float, default=None)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
# min_timesteps_per_batch=args.batch_size,
batch_size=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size,
target_reward=args.target_reward
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
| 40.952212 | 116 | 0.500864 | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
with tf.variable_scope(scope):
hidden = input_placeholder
for i in range(n_layers):
hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))
return tf.layers.dense(hidden, output_size, output_activation)
def pathlength(path):
return len(path["reward"])
def reward_to_q(rewards, gamma, reward_to_go):
q = np.zeros_like(rewards)
T = len(rewards)
if reward_to_go:
q += rewards
for i in range(1, T):
q[:(T - i)] += gamma * q[i:T]
else:
r = 0
for i in range(T - 1, -1, -1):
r = rewards[i] + gamma * r
q = r * np.ones_like(q)
return q
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
batch_size=20,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
n_layers=1,
size=32,
target_reward=None
):
start = time.time()
TODO = 1
logz.configure_output_dir(logdir)
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
tf.set_random_seed(seed)
np.random.seed(seed)
env = gym.make(env_name)
discrete = isinstance(env.action_space, gym.spaces.Discrete)
assert discrete, 'only discrete is implemented'
max_path_length = max_path_length or env.spec.max_episode_steps
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
if discrete:
sy_logits_na = build_mlp(
sy_ob_no,
ac_dim,
"nn_policy",
n_layers=n_layers,
size=size)
sy_sampled_ac = tf.multinomial(sy_logits_na, 1)
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)
else:
sy_mean = TODO
sy_logstd = TODO
sy_sampled_ac = TODO
sy_logprob_n = TODO
# TODO: reduce_mean is not really correct here
loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# MY_CODE_HERE
sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
tf_board = os.path.join('/tmp/gube/hw2')
writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))
writer.add_graph(sess.graph)
merged_summary = tf.summary.merge_all()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
total_trials = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
trials_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0][0] # was ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
total_trials += 1
trials_this_batch += 1
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
# if timesteps_this_batch > min_timesteps_per_batch:
# break
if trials_this_batch == batch_size:
break
total_timesteps += timesteps_this_batch
print('total trials:', total_trials)
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# MY_CODE_HERE
q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
# MY_CODE_HERE
# The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# MY_CODE_HERE
adv_mu = np.mean(adv_n)
adv_std = np.std(adv_n)
# Could be more robust than this
if adv_std == 0.0:
return
# The normalization could be problematic.
# For environments like CartPole, the reward is an integer and is capped at 200.
# When not using base, adv_n could all be 200 and adv_std = 0.
adv_n = (adv_n - adv_mu) / adv_std
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# MY_CODE_HERE
# TODO: what is the right way to fit?
# 1. Using fixed number of steps.
# It might not balance the good vs bad paths well, but 100 seems pretty good.
# 2. Using timesteps as number of steps. This is CartPole specific.
print('timesteps:', timesteps_this_batch)
for i in range(100):
sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# MY_CODE_HERE
sess.run(update_op, feed_dict={sy_ob_no: ob_no,
sy_ac_na: ac_na,
sy_adv_n: adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
# This stopping criterion is not robust when the batch size is small.
if target_reward is not None:
if np.mean([path["reward"].sum() for path in paths]) >= target_reward:
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
parser.add_argument('--target_reward', type=float, default=None)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
# min_timesteps_per_batch=args.batch_size,
batch_size=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size,
target_reward=args.target_reward
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
| true | true |
f71d1d6431cd9864c8ac945c489e1505fd9daf69 | 3,271 | py | Python | how_to_play.py | MuhammadTalha28/sapace-invader | dbc7eb28d48491c78d2b732a29c313ad7a1de228 | [
"Unlicense"
] | null | null | null | how_to_play.py | MuhammadTalha28/sapace-invader | dbc7eb28d48491c78d2b732a29c313ad7a1de228 | [
"Unlicense"
] | null | null | null | how_to_play.py | MuhammadTalha28/sapace-invader | dbc7eb28d48491c78d2b732a29c313ad7a1de228 | [
"Unlicense"
] | null | null | null |
import pygame
from Buttons import Button
import os
import menu
pygame.font.init()
pygame.mixer.init()
pygame.init()
# constants
FONT = pygame.font.SysFont('comicsans', 30)
SEC_FONT = pygame.font.SysFont('comicsans', 22)
TEXT_COLOR = (255, 255, 255)
WIDTH, HEIGHT = 600, 750
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
HEAD_IMG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets', 'head.png')), (WIDTH, HEIGHT)).convert_alpha()
BACK_BTN = pygame.transform.scale(pygame.image.load(
'Assets\Buttons\Back.png').convert_alpha(), (60, 60))
SPACE_BG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets', 'stars_texture.png')), (WIDTH, HEIGHT))
MENU_BG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets\Buttons', 'Menu-bg.png')), (WIDTH-80, HEIGHT-140))
back_btn = Button(10, 10, BACK_BTN, BACK_BTN, WIN)
def Set_Draw(y, Text_list):
WIN.blit(SPACE_BG, (0, 0))
WIN.blit(MENU_BG, (40, 100))
Text_Draw.Draw(Text_Draw(Text_list, y), WIN)
WIN.blit(HEAD_IMG, (0, 0))
back_btn.Draw_btn()
screen_label = FONT.render("How To Play", 1, TEXT_COLOR)
WIN.blit(screen_label, (WIDTH//2 - screen_label.get_width()//2, 10))
WIN.blit(HEAD_IMG, (0, 705))
class Text_Draw:
Text_Map = {
1: "Dont let the eneimes hit the lower part of the screen",
2: "If an enemy hits the bottom of the screen you will lose 1 live",
3: "Get as much highscore as possible"
}
def __init__(self, text, y, color=TEXT_COLOR):
self.text = text
self.y = y
self.color = color
def Draw(self, window):
for text in self.text:
label = SEC_FONT.render(text, 1, TEXT_COLOR)
window.blit(
label, (WIDTH//2-label.get_width() // 2, self.y+30*self.text.index(text)))
def Main():
y = 120
clock = pygame.time.Clock()
Text_list = ["Controls for single player:", "A and D to move the player ship", "SPACE to shoot bullets", "P to pause the game", "Q to exit out of the game",
"Rules for single player:", "Dont let the enemy ships hit ", "the bottom of the screen", "If an enemy hits the bottom of the screen ", "you will lose 1 live", "enemies get faster with time",
"If you lose all the live or", "lose all the health you lose", "try to beat the high score", "watch out how you use our bullets as ", "they are limited.",
"WASD to move the ship", "RIGHT CTRL shoot bullets and ", "RIGHT ALT to shoot rockets",
"Kill the opposing player before getting eliminated.", "You have unlimited ammo but it has a cool down"]
crun = True
while crun:
clock.tick(60)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
quit()
if back_btn.Draw_btn():
menu.Main()
keys = pygame.key.get_pressed()
if keys[pygame.K_w] and y > 60:
y -= 2
if keys[pygame.K_s]:
if y < 120:
y += 2
Set_Draw(y, Text_list)
pygame.display.update()
if __name__ == "__main__":
Main()
| 34.431579 | 208 | 0.602262 |
import pygame
from Buttons import Button
import os
import menu
pygame.font.init()
pygame.mixer.init()
pygame.init()
FONT = pygame.font.SysFont('comicsans', 30)
SEC_FONT = pygame.font.SysFont('comicsans', 22)
TEXT_COLOR = (255, 255, 255)
WIDTH, HEIGHT = 600, 750
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
HEAD_IMG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets', 'head.png')), (WIDTH, HEIGHT)).convert_alpha()
BACK_BTN = pygame.transform.scale(pygame.image.load(
'Assets\Buttons\Back.png').convert_alpha(), (60, 60))
SPACE_BG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets', 'stars_texture.png')), (WIDTH, HEIGHT))
MENU_BG = pygame.transform.scale(pygame.image.load(
os.path.join('Assets\Buttons', 'Menu-bg.png')), (WIDTH-80, HEIGHT-140))
back_btn = Button(10, 10, BACK_BTN, BACK_BTN, WIN)
def Set_Draw(y, Text_list):
WIN.blit(SPACE_BG, (0, 0))
WIN.blit(MENU_BG, (40, 100))
Text_Draw.Draw(Text_Draw(Text_list, y), WIN)
WIN.blit(HEAD_IMG, (0, 0))
back_btn.Draw_btn()
screen_label = FONT.render("How To Play", 1, TEXT_COLOR)
WIN.blit(screen_label, (WIDTH//2 - screen_label.get_width()//2, 10))
WIN.blit(HEAD_IMG, (0, 705))
class Text_Draw:
Text_Map = {
1: "Dont let the eneimes hit the lower part of the screen",
2: "If an enemy hits the bottom of the screen you will lose 1 live",
3: "Get as much highscore as possible"
}
def __init__(self, text, y, color=TEXT_COLOR):
self.text = text
self.y = y
self.color = color
def Draw(self, window):
for text in self.text:
label = SEC_FONT.render(text, 1, TEXT_COLOR)
window.blit(
label, (WIDTH//2-label.get_width() // 2, self.y+30*self.text.index(text)))
def Main():
y = 120
clock = pygame.time.Clock()
Text_list = ["Controls for single player:", "A and D to move the player ship", "SPACE to shoot bullets", "P to pause the game", "Q to exit out of the game",
"Rules for single player:", "Dont let the enemy ships hit ", "the bottom of the screen", "If an enemy hits the bottom of the screen ", "you will lose 1 live", "enemies get faster with time",
"If you lose all the live or", "lose all the health you lose", "try to beat the high score", "watch out how you use our bullets as ", "they are limited.",
"WASD to move the ship", "RIGHT CTRL shoot bullets and ", "RIGHT ALT to shoot rockets",
"Kill the opposing player before getting eliminated.", "You have unlimited ammo but it has a cool down"]
crun = True
while crun:
clock.tick(60)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
quit()
if back_btn.Draw_btn():
menu.Main()
keys = pygame.key.get_pressed()
if keys[pygame.K_w] and y > 60:
y -= 2
if keys[pygame.K_s]:
if y < 120:
y += 2
Set_Draw(y, Text_list)
pygame.display.update()
if __name__ == "__main__":
Main()
| true | true |
f71d1e6ab9665ef77195b8e703b3c0c7acfd60f4 | 9,851 | py | Python | runner_mockingjay.py | andi611/Mockingjay-Speech-Representation | 8f41f5728bdb94497e939fee0d67c7f65729a035 | [
"MIT"
] | 105 | 2019-10-24T05:28:57.000Z | 2022-02-21T23:08:07.000Z | runner_mockingjay.py | samirsahoo007/Audio-and-Speech-Processing | e77df17a7f63a983c3757140c7a1e8c199cac614 | [
"MIT"
] | 9 | 2020-02-17T06:39:53.000Z | 2022-03-14T08:46:35.000Z | runner_mockingjay.py | samirsahoo007/Audio-and-Speech-Processing | e77df17a7f63a983c3757140c7a1e8c199cac614 | [
"MIT"
] | 19 | 2019-10-29T11:40:34.000Z | 2021-11-24T16:36:04.000Z | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner_mockingjay.py ]
# Synopsis [ runner for the mockingjay model ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import yaml
import torch
import random
import argparse
import numpy as np
from utility.timer import Timer
#############################
# MOCKINGJAY CONFIGURATIONS #
#############################
def get_mockingjay_args():
parser = argparse.ArgumentParser(description='Argument Parser for the mockingjay project.')
# setting
parser.add_argument('--config', default='config/mockingjay_libri.yaml', type=str, help='Path to experiment config.')
parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False)
# Logging
parser.add_argument('--logdir', default='log/log_mockingjay/', type=str, help='Logging path.', required=False)
parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False)
# model ckpt
parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.')
parser.add_argument('--ckpdir', default='result/result_mockingjay/', type=str, help='Checkpoint/Result path.', required=False)
parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_LinearLarge/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)
# parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_MelBase/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)
parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False)
parser.add_argument('--apc_path', default='./result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False)
# mockingjay
parser.add_argument('--train', action='store_true', help='Train the model.')
parser.add_argument('--run_mockingjay', action='store_true', help='train and test the downstream tasks using mockingjay representations.')
parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.')
parser.add_argument('--fine_tune', action='store_true', help='fine tune the mockingjay model with downstream task.')
parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.')
# phone task
parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or mockingjay representations.')
parser.add_argument('--test_phone', action='store_true', help='Test mel or mockingjay representations using the trained phone classifier.')
# sentiment task
parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or mockingjay representations.')
parser.add_argument('--test_sentiment', action='store_true', help='Test mel or mockingjay representations using the trained sentiment classifier.')
# speaker verification task
parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or mockingjay representations.')
parser.add_argument('--test_speaker', action='store_true', help='Test mel or mockingjay representations using the trained speaker classifier.')
# Options
parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.')
parser.add_argument('--output_attention', action='store_true', help='plot attention')
parser.add_argument('--load_ws', default='result/result_mockingjay_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model')
parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')
parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')
args = parser.parse_args()
setattr(args,'gpu', not args.cpu)
setattr(args,'verbose', not args.no_msg)
config = yaml.load(open(args.config,'r'))
config['timer'] = Timer()
return config, args
########
# MAIN #
########
def main():
# get arguments
config, args = get_mockingjay_args()
# Fix seed and make backends deterministic
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# Train Mockingjay
if args.train:
from mockingjay.solver import Trainer
trainer = Trainer(config, args)
trainer.load_data(split='train')
trainer.set_model(inference=False)
trainer.exec()
##################################################################################
# Train Phone Task
elif args.train_phone:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_phone' if args.run_mockingjay \
else 'apc_phone' if args.run_apc else 'baseline_phone'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='phone')
trainer.set_model(inference=False)
trainer.exec()
# Test Phone Task
elif args.test_phone:
from downstream.solver import Downstream_Tester
task = 'mockingjay_phone' if args.run_mockingjay \
else 'apc_phone' if args.run_apc else 'baseline_phone'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='phone')
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Train Sentiment Task
elif args.train_sentiment:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_sentiment' if args.run_mockingjay \
else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='sentiment')
trainer.set_model(inference=False)
trainer.exec()
# Test Sentiment Task
elif args.test_sentiment:
from downstream.solver import Downstream_Tester
task = 'mockingjay_sentiment' if args.run_mockingjay \
else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='sentiment')
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Train Speaker Task
elif args.train_speaker:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_speaker' if args.run_mockingjay \
else 'apc_speaker' if args.run_apc else 'baseline_speaker'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='speaker')
# trainer.load_data(split='train', load='speaker_large') # Deprecated
trainer.set_model(inference=False)
trainer.exec()
# Test Speaker Task
elif args.test_speaker:
from downstream.solver import Downstream_Tester
task = 'mockingjay_speaker' if args.run_mockingjay \
else 'apc_speaker' if args.run_apc else 'baseline_speaker'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='speaker')
# tester.load_data(split='test', load='speaker_large') # Deprecated
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Visualize Mockingjay
elif args.plot:
from mockingjay.solver import Tester
tester = Tester(config, args)
tester.load_data(split='test', load_mel_only=True)
tester.set_model(inference=True, with_head=args.with_head, output_attention=args.output_attention)
tester.plot(with_head=args.with_head)
config['timer'].report()
########################
# GET MOCKINGJAY MODEL #
########################
def get_mockingjay_model(from_path='result/result_mockingjay/mockingjay_libri_sd1337_best/mockingjay-500000.ckpt', display_settings=False):
''' Wrapper that loads the mockingjay model from checkpoint path '''
# load config and paras
all_states = torch.load(from_path, map_location='cpu')
config = all_states['Settings']['Config']
paras = all_states['Settings']['Paras']
# display checkpoint settings
if display_settings:
for cluster in config:
print(cluster + ':')
for item in config[cluster]:
print('\t' + str(item) + ': ', config[cluster][item])
print('paras:')
v_paras = vars(paras)
for item in v_paras:
print('\t' + str(item) + ': ', v_paras[item])
# load model with Tester
from mockingjay.solver import Tester
mockingjay = Tester(config, paras)
mockingjay.set_model(inference=True, with_head=False, from_path=from_path)
return mockingjay
if __name__ == '__main__':
main()
| 45.396313 | 189 | 0.650391 |
-ckpt', default='mockingjay_libri_sd1337_LinearLarge/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)
parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False)
parser.add_argument('--apc_path', default='./result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False)
parser.add_argument('--train', action='store_true', help='Train the model.')
parser.add_argument('--run_mockingjay', action='store_true', help='train and test the downstream tasks using mockingjay representations.')
parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.')
parser.add_argument('--fine_tune', action='store_true', help='fine tune the mockingjay model with downstream task.')
parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.')
parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or mockingjay representations.')
parser.add_argument('--test_phone', action='store_true', help='Test mel or mockingjay representations using the trained phone classifier.')
parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or mockingjay representations.')
parser.add_argument('--test_sentiment', action='store_true', help='Test mel or mockingjay representations using the trained sentiment classifier.')
parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or mockingjay representations.')
parser.add_argument('--test_speaker', action='store_true', help='Test mel or mockingjay representations using the trained speaker classifier.')
parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.')
parser.add_argument('--output_attention', action='store_true', help='plot attention')
parser.add_argument('--load_ws', default='result/result_mockingjay_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model')
parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')
parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')
args = parser.parse_args()
setattr(args,'gpu', not args.cpu)
setattr(args,'verbose', not args.no_msg)
config = yaml.load(open(args.config,'r'))
config['timer'] = Timer()
return config, args
y_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
if args.train:
from mockingjay.solver import Trainer
trainer = Trainer(config, args)
trainer.load_data(split='train')
trainer.set_model(inference=False)
trainer.exec()
| true | true |
f71d1e917a53f537d6a94b2b2ce9a5baf836f8aa | 540 | py | Python | 1.py | deepaksood619/python-challenge | 8ee08136b827e75571307fa36cd65e7ba6a162de | [
"MIT"
] | 2 | 2021-04-25T08:04:46.000Z | 2022-02-05T17:02:24.000Z | 1.py | deepaksood619/python-challenge | 8ee08136b827e75571307fa36cd65e7ba6a162de | [
"MIT"
] | null | null | null | 1.py | deepaksood619/python-challenge | 8ee08136b827e75571307fa36cd65e7ba6a162de | [
"MIT"
] | null | null | null | # Question - http://www.pythonchallenge.com/pc/def/map.html
# Thought Process -
# Its a caesar's cipher with ROT2 as the image shows the
# alphabet shifting
# ROT2 Shift answer - i hope you didnt translate it by hand. thats what computers are for. doing it in by hand is inefficient and that's why this text is so long. using string.maketrans() is recommended. now apply on the url.
# ROT2 on map - ocr
# used https://planetcalc.com/1434/ for directly calculating the ROT
# solution - http://www.pythonchallenge.com/pc/def/ocr.html
| 41.538462 | 225 | 0.744444 |
# alphabet shifting
# ROT2 Shift answer - i hope you didnt translate it by hand. thats what computers are for. doing it in by hand is inefficient and that's why this text is so long. using string.maketrans() is recommended. now apply on the url.
| true | true |
f71d1fa896751ba8c82289f0ddabde96392c403d | 817 | py | Python | data/train/python/f71d1fa896751ba8c82289f0ddabde96392c403dmain.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/f71d1fa896751ba8c82289f0ddabde96392c403dmain.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/f71d1fa896751ba8c82289f0ddabde96392c403dmain.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | #!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import sys
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../utils'))
import websocket
def main():
gameId = 'abc123'
controllerId = '90210'
displayWS = websocket.WebSocket()
displayWS.connect("ws://127.0.0.1:8887/%s/display" % gameId)
controllerWS = websocket.WebSocket()
controllerWS.connect("ws://127.0.0.1:8887/%s/controller/%s" % (gameId, controllerId))
displayWS.send('%s show controller welcome screen' % controllerId)
controllerWS.send('please accept this gesture')
print 'DISPLAY GOT:', displayWS.recv()
print 'CONTROLLER GOT:', controllerWS.recv()
displayWS.close()
controllerWS.close()
if __name__ == '__main__':
main()
| 25.53125 | 89 | 0.682987 |
import os
import sys
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../utils'))
import websocket
def main():
gameId = 'abc123'
controllerId = '90210'
displayWS = websocket.WebSocket()
displayWS.connect("ws://127.0.0.1:8887/%s/display" % gameId)
controllerWS = websocket.WebSocket()
controllerWS.connect("ws://127.0.0.1:8887/%s/controller/%s" % (gameId, controllerId))
displayWS.send('%s show controller welcome screen' % controllerId)
controllerWS.send('please accept this gesture')
print 'DISPLAY GOT:', displayWS.recv()
print 'CONTROLLER GOT:', controllerWS.recv()
displayWS.close()
controllerWS.close()
if __name__ == '__main__':
main()
| false | true |
f71d1fd277bd074b8e2770f851db3f9f3f26dc60 | 6,560 | py | Python | api/app/helpers/cluster_task.py | riszkymf/RESTKnot | 2788e0cce127e6d66fdb72e81b31a983e89979d2 | [
"MIT"
] | null | null | null | api/app/helpers/cluster_task.py | riszkymf/RESTKnot | 2788e0cce127e6d66fdb72e81b31a983e89979d2 | [
"MIT"
] | null | null | null | api/app/helpers/cluster_task.py | riszkymf/RESTKnot | 2788e0cce127e6d66fdb72e81b31a983e89979d2 | [
"MIT"
] | null | null | null | from app import celery
from celery.result import AsyncResult
from app.libs import utils
from app.helpers import command
from app.models import model
from app.helpers import cluster_master, cluster_slave
from app import cs_storage
@celery.task(bind=True)
def get_cluster_data_master(self, id_master):
res_master = AsyncResult(id=id_master, app=cluster_task_master)
return res_master
@celery.task(bind=True)
def get_cluster_data_slave(self, id_slave):
res_slave = AsyncResult(id=id_slave, app=cluster_task_slave)
return res_slave
@celery.task(bind=True)
def get_cluster_data_master_unset(self, id_master):
res_master = AsyncResult(id=id_master, app=unset_cluster_master)
return res_master
@celery.task(bind=True)
def get_cluster_data_slave_unset(self, id_slave):
res_slave = AsyncResult(id=id_slave, app=unset_cluster_slave)
return res_slave
@celery.task(bind=True)
def cluster_task_master(self, tags):
respons = []
result = []
id_zone = tags['id_zone']
master_data = None
try:
master_data = model.get_all("cs_master")
except Exception as e:
raise e
else:
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
for i in master_data:
print("Execute Master: "+str(i['nm_master']))
urls = "http://"+i['ip_master']+":"+i['port']+"/api/command_rest"
data_commands = list()
data_commands.append(command.conf_begin_http_cl())
ffi_insert_conf = cluster_master.insert_config_zone(data_zone, i['nm_config'])
data_commands.append(ffi_insert_conf)
ffi_master = cluster_master.master_create_json_master(data_zone, i['nm_config'])
data_commands.append(ffi_master)
ffi_notify = None
ffi_notify = cluster_master.master_create_json_notify(data_zone, i['nm_config'], urls)
for i_not in ffi_notify:
data_commands.append(i_not)
ffi_acl = None
ffi_acl = cluster_master.master_create_json_acl(data_zone, i['nm_config'], urls)
for i_ac in ffi_acl:
data_commands.append(i_ac)
ffi_set_files = cluster_master.set_file_all(data_zone)
data_commands.append(ffi_set_files)
ffi_set_module = cluster_master.set_mods_stats_all(data_zone, "mod-stats/default")
data_commands.append(ffi_set_module)
ffi_serial_policy = cluster_master.set_serial_policy_all(data_zone, "dateserial")
data_commands.append(ffi_serial_policy)
data_commands.append(command.conf_commit_http_cl())
result = utils.send_http_clusters(urls, data_commands)
respons.append({
"config": i['nm_config'],
"nm_server": i['nm_master'],
"data": result['data'],
"time": result['times']
})
return respons
@celery.task(bind=True)
def cluster_task_slave(self, tags):
respons = []
result = []
id_zone = tags['id_zone']
try:
slave_data = model.get_all("v_cs_slave_node")
except Exception as e:
raise e
else:
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
for i in slave_data:
print("Execute Slave: "+str(i['nm_slave_node']))
urls = "http://"+i['ip_slave_node']+":"+i['port_slave_node']+"/api/command_rest"
data_test = list()
cf_begin = command.conf_begin_http_cl()
data_test.append(cf_begin)
ffi_insert_conf = cluster_slave.insert_config_zone(data_zone)
data_test.append(ffi_insert_conf)
ffi_slave_master = cluster_slave.master_create_json(data_zone, i['nm_master'])
data_test.append(ffi_slave_master)
ffi_slave_acl = cluster_slave.create_json_acl(data_zone, i['nm_master'])
data_test.append(ffi_slave_acl)
ffi_set_files = cluster_master.set_file_all(data_zone)
data_test.append(ffi_set_files)
ffi_set_module = cluster_master.set_mods_stats_all(data_zone, "mod-stats/default")
data_test.append(ffi_set_module)
ffi_serial_policy = cluster_master.set_serial_policy_all(data_zone, "dateserial")
data_test.append(ffi_serial_policy)
cf_commit = command.conf_commit_http_cl()
data_test.append(cf_commit)
result = utils.send_http_clusters(urls, data_test)
respons.append({
"server": i['nm_config'],
"data": result['data'],
"time": result['times']
})
return respons
@celery.task(bind=True)
def unset_cluster_master(self, tags):
result = []
id_zone = tags['id_zone']
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
try:
master_data = model.get_all("cs_master")
except Exception as e:
raise e
for i in master_data:
data = list()
url_fix= "http://"+i['ip_master']+":"+i['port']
master_server_url = url_fix+"/api/command_rest"
data.append(command.conf_begin_http_cl())
master_command = command.unset_cluster_command_new(tags, data_zone['nm_zone'])
data.append(master_command)
data.append(command.conf_commit_http_cl())
response = utils.send_http_clusters(master_server_url, data)
result.append(response)
return result
@celery.task(bind=True)
def unset_cluster_slave(self, tags):
result = []
id_zone = tags['id_zone']
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
try:
data_slave = model.get_all("v_cs_slave_node")
except Exception as e:
raise e
for a in data_slave:
data_slave = list()
url_fix= "http://"+a['ip_slave_node']+":"+a['port_slave_node']
slave_server_url = url_fix+"/api/command_rest"
data_slave.append(command.conf_begin_http_cl())
slave_command = command.unset_cluster_command_new(tags, data_zone['nm_zone'])
data_slave.append(slave_command)
data_slave.append(command.conf_commit_http_cl())
http_response_slave = utils.send_http_clusters(slave_server_url, data_slave)
result.append(http_response_slave)
return result
| 37.919075 | 98 | 0.647713 | from app import celery
from celery.result import AsyncResult
from app.libs import utils
from app.helpers import command
from app.models import model
from app.helpers import cluster_master, cluster_slave
from app import cs_storage
@celery.task(bind=True)
def get_cluster_data_master(self, id_master):
res_master = AsyncResult(id=id_master, app=cluster_task_master)
return res_master
@celery.task(bind=True)
def get_cluster_data_slave(self, id_slave):
res_slave = AsyncResult(id=id_slave, app=cluster_task_slave)
return res_slave
@celery.task(bind=True)
def get_cluster_data_master_unset(self, id_master):
res_master = AsyncResult(id=id_master, app=unset_cluster_master)
return res_master
@celery.task(bind=True)
def get_cluster_data_slave_unset(self, id_slave):
res_slave = AsyncResult(id=id_slave, app=unset_cluster_slave)
return res_slave
@celery.task(bind=True)
def cluster_task_master(self, tags):
respons = []
result = []
id_zone = tags['id_zone']
master_data = None
try:
master_data = model.get_all("cs_master")
except Exception as e:
raise e
else:
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
for i in master_data:
print("Execute Master: "+str(i['nm_master']))
urls = "http://"+i['ip_master']+":"+i['port']+"/api/command_rest"
data_commands = list()
data_commands.append(command.conf_begin_http_cl())
ffi_insert_conf = cluster_master.insert_config_zone(data_zone, i['nm_config'])
data_commands.append(ffi_insert_conf)
ffi_master = cluster_master.master_create_json_master(data_zone, i['nm_config'])
data_commands.append(ffi_master)
ffi_notify = None
ffi_notify = cluster_master.master_create_json_notify(data_zone, i['nm_config'], urls)
for i_not in ffi_notify:
data_commands.append(i_not)
ffi_acl = None
ffi_acl = cluster_master.master_create_json_acl(data_zone, i['nm_config'], urls)
for i_ac in ffi_acl:
data_commands.append(i_ac)
ffi_set_files = cluster_master.set_file_all(data_zone)
data_commands.append(ffi_set_files)
ffi_set_module = cluster_master.set_mods_stats_all(data_zone, "mod-stats/default")
data_commands.append(ffi_set_module)
ffi_serial_policy = cluster_master.set_serial_policy_all(data_zone, "dateserial")
data_commands.append(ffi_serial_policy)
data_commands.append(command.conf_commit_http_cl())
result = utils.send_http_clusters(urls, data_commands)
respons.append({
"config": i['nm_config'],
"nm_server": i['nm_master'],
"data": result['data'],
"time": result['times']
})
return respons
@celery.task(bind=True)
def cluster_task_slave(self, tags):
respons = []
result = []
id_zone = tags['id_zone']
try:
slave_data = model.get_all("v_cs_slave_node")
except Exception as e:
raise e
else:
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
for i in slave_data:
print("Execute Slave: "+str(i['nm_slave_node']))
urls = "http://"+i['ip_slave_node']+":"+i['port_slave_node']+"/api/command_rest"
data_test = list()
cf_begin = command.conf_begin_http_cl()
data_test.append(cf_begin)
ffi_insert_conf = cluster_slave.insert_config_zone(data_zone)
data_test.append(ffi_insert_conf)
ffi_slave_master = cluster_slave.master_create_json(data_zone, i['nm_master'])
data_test.append(ffi_slave_master)
ffi_slave_acl = cluster_slave.create_json_acl(data_zone, i['nm_master'])
data_test.append(ffi_slave_acl)
ffi_set_files = cluster_master.set_file_all(data_zone)
data_test.append(ffi_set_files)
ffi_set_module = cluster_master.set_mods_stats_all(data_zone, "mod-stats/default")
data_test.append(ffi_set_module)
ffi_serial_policy = cluster_master.set_serial_policy_all(data_zone, "dateserial")
data_test.append(ffi_serial_policy)
cf_commit = command.conf_commit_http_cl()
data_test.append(cf_commit)
result = utils.send_http_clusters(urls, data_test)
respons.append({
"server": i['nm_config'],
"data": result['data'],
"time": result['times']
})
return respons
@celery.task(bind=True)
def unset_cluster_master(self, tags):
result = []
id_zone = tags['id_zone']
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
try:
master_data = model.get_all("cs_master")
except Exception as e:
raise e
for i in master_data:
data = list()
url_fix= "http://"+i['ip_master']+":"+i['port']
master_server_url = url_fix+"/api/command_rest"
data.append(command.conf_begin_http_cl())
master_command = command.unset_cluster_command_new(tags, data_zone['nm_zone'])
data.append(master_command)
data.append(command.conf_commit_http_cl())
response = utils.send_http_clusters(master_server_url, data)
result.append(response)
return result
@celery.task(bind=True)
def unset_cluster_slave(self, tags):
result = []
id_zone = tags['id_zone']
try:
data_zone = model.get_by_id("zn_zone", "id_zone", id_zone)[0]
except Exception as e:
raise e
try:
data_slave = model.get_all("v_cs_slave_node")
except Exception as e:
raise e
for a in data_slave:
data_slave = list()
url_fix= "http://"+a['ip_slave_node']+":"+a['port_slave_node']
slave_server_url = url_fix+"/api/command_rest"
data_slave.append(command.conf_begin_http_cl())
slave_command = command.unset_cluster_command_new(tags, data_zone['nm_zone'])
data_slave.append(slave_command)
data_slave.append(command.conf_commit_http_cl())
http_response_slave = utils.send_http_clusters(slave_server_url, data_slave)
result.append(http_response_slave)
return result
| true | true |
f71d2199e355f7f3ca1e600d31a19dd65f515834 | 16,247 | py | Python | lemur/certificates/models.py | peschmae/lemur | c10f4b64835ac6d6fe75306ecb36be793ccb7288 | [
"Apache-2.0"
] | null | null | null | lemur/certificates/models.py | peschmae/lemur | c10f4b64835ac6d6fe75306ecb36be793ccb7288 | [
"Apache-2.0"
] | 2 | 2021-02-10T02:29:45.000Z | 2021-04-30T21:40:40.000Z | lemur/certificates/models.py | peschmae/lemur | c10f4b64835ac6d6fe75306ecb36be793ccb7288 | [
"Apache-2.0"
] | null | null | null | """
.. module: lemur.certificates.models
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from datetime import timedelta
import arrow
from cryptography import x509
from flask import current_app
from idna.core import InvalidCodepoint
from sqlalchemy import (
event,
Integer,
ForeignKey,
String,
PassiveDefault,
func,
Column,
Text,
Boolean,
Index,
)
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import case, extract
from sqlalchemy_utils.types.arrow import ArrowType
from werkzeug.utils import cached_property
from lemur.common import defaults, utils, validators
from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS
from lemur.database import db
from lemur.domains.models import Domain
from lemur.extensions import metrics
from lemur.extensions import sentry
from lemur.models import (
certificate_associations,
certificate_source_associations,
certificate_destination_associations,
certificate_notification_associations,
certificate_replacement_associations,
roles_certificates,
pending_cert_replacement_associations,
)
from lemur.plugins.base import plugins
from lemur.policies.models import RotationPolicy
from lemur.utils import Vault
def get_sequence(name):
if "-" not in name:
return name, None
parts = name.split("-")
# see if we have an int at the end of our name
try:
seq = int(parts[-1])
except ValueError:
return name, None
# we might have a date at the end of our name
if len(parts[-1]) == 8:
return name, None
root = "-".join(parts[:-1])
return root, seq
def get_or_increase_name(name, serial):
certificates = Certificate.query.filter(Certificate.name == name).all()
if not certificates:
return name
serial_name = "{0}-{1}".format(name, hex(int(serial))[2:].upper())
certificates = Certificate.query.filter(Certificate.name == serial_name).all()
if not certificates:
return serial_name
certificates = Certificate.query.filter(
Certificate.name.ilike("{0}%".format(serial_name))
).all()
ends = [0]
root, end = get_sequence(serial_name)
for cert in certificates:
root, end = get_sequence(cert.name)
if end:
ends.append(end)
return "{0}-{1}".format(root, max(ends) + 1)
class Certificate(db.Model):
__tablename__ = "certificates"
__table_args__ = (
Index(
"ix_certificates_cn",
"cn",
postgresql_ops={"cn": "gin_trgm_ops"},
postgresql_using="gin",
),
Index(
"ix_certificates_name",
"name",
postgresql_ops={"name": "gin_trgm_ops"},
postgresql_using="gin",
),
)
id = Column(Integer, primary_key=True)
ix = Index(
"ix_certificates_id_desc", id.desc(), postgresql_using="btree", unique=True
)
external_id = Column(String(128))
owner = Column(String(128), nullable=False)
name = Column(String(256), unique=True)
description = Column(String(1024))
notify = Column(Boolean, default=True)
body = Column(Text(), nullable=False)
chain = Column(Text())
csr = Column(Text())
private_key = Column(Vault)
issuer = Column(String(128))
serial = Column(String(128))
cn = Column(String(128))
deleted = Column(Boolean, index=True, default=False)
dns_provider_id = Column(
Integer(), ForeignKey("dns_providers.id", ondelete="CASCADE"), nullable=True
)
not_before = Column(ArrowType)
not_after = Column(ArrowType)
not_after_ix = Index("ix_certificates_not_after", not_after.desc())
date_created = Column(ArrowType, PassiveDefault(func.now()), nullable=False)
signing_algorithm = Column(String(128))
status = Column(String(128))
bits = Column(Integer())
san = Column(String(1024)) # TODO this should be migrated to boolean
rotation = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey("users.id"))
authority_id = Column(Integer, ForeignKey("authorities.id", ondelete="CASCADE"))
root_authority_id = Column(
Integer, ForeignKey("authorities.id", ondelete="CASCADE")
)
rotation_policy_id = Column(Integer, ForeignKey("rotation_policies.id"))
key_type = Column(String(128))
notifications = relationship(
"Notification",
secondary=certificate_notification_associations,
backref="certificate",
)
destinations = relationship(
"Destination",
secondary=certificate_destination_associations,
backref="certificate",
)
sources = relationship(
"Source", secondary=certificate_source_associations, backref="certificate"
)
domains = relationship(
"Domain", secondary=certificate_associations, backref="certificate"
)
roles = relationship("Role", secondary=roles_certificates, backref="certificate")
replaces = relationship(
"Certificate",
secondary=certificate_replacement_associations,
primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa
secondaryjoin=id
== certificate_replacement_associations.c.replaced_certificate_id, # noqa
backref="replaced",
)
replaced_by_pending = relationship(
"PendingCertificate",
secondary=pending_cert_replacement_associations,
backref="pending_replace",
viewonly=True,
)
logs = relationship("Log", backref="certificate")
endpoints = relationship("Endpoint", backref="certificate")
rotation_policy = relationship("RotationPolicy")
sensitive_fields = ("private_key",)
def __init__(self, **kwargs):
self.body = kwargs["body"].strip()
cert = self.parsed_cert
self.issuer = defaults.issuer(cert)
self.cn = defaults.common_name(cert)
self.san = defaults.san(cert)
self.not_before = defaults.not_before(cert)
self.not_after = defaults.not_after(cert)
self.serial = defaults.serial(cert)
# when destinations are appended they require a valid name.
if kwargs.get("name"):
self.name = get_or_increase_name(
defaults.text_to_slug(kwargs["name"]), self.serial
)
else:
self.name = get_or_increase_name(
defaults.certificate_name(
self.cn, self.issuer, self.not_before, self.not_after, self.san
),
self.serial,
)
self.owner = kwargs["owner"]
if kwargs.get("private_key"):
self.private_key = kwargs["private_key"].strip()
if kwargs.get("chain"):
self.chain = kwargs["chain"].strip()
if kwargs.get("csr"):
self.csr = kwargs["csr"].strip()
self.notify = kwargs.get("notify", True)
self.destinations = kwargs.get("destinations", [])
self.notifications = kwargs.get("notifications", [])
self.description = kwargs.get("description")
self.roles = list(set(kwargs.get("roles", [])))
self.replaces = kwargs.get("replaces", [])
self.rotation = kwargs.get("rotation")
self.rotation_policy = kwargs.get("rotation_policy")
self.signing_algorithm = defaults.signing_algorithm(cert)
self.bits = defaults.bitstrength(cert)
self.external_id = kwargs.get("external_id")
self.authority_id = kwargs.get("authority_id")
self.dns_provider_id = kwargs.get("dns_provider_id")
for domain in defaults.domains(cert):
self.domains.append(Domain(name=domain))
# Check integrity before saving anything into the database.
# For user-facing API calls, validation should also be done in schema validators.
self.check_integrity()
def check_integrity(self):
"""
Integrity checks: Does the cert have a valid chain and matching private key?
"""
if self.private_key:
validators.verify_private_key_match(
utils.parse_private_key(self.private_key),
self.parsed_cert,
error_class=AssertionError,
)
if self.chain:
chain = [self.parsed_cert] + utils.parse_cert_chain(self.chain)
validators.verify_cert_chain(chain, error_class=AssertionError)
@cached_property
def parsed_cert(self):
assert self.body, "Certificate body not set"
return utils.parse_certificate(self.body)
@property
def active(self):
return self.notify
@property
def organization(self):
return defaults.organization(self.parsed_cert)
@property
def organizational_unit(self):
return defaults.organizational_unit(self.parsed_cert)
@property
def country(self):
return defaults.country(self.parsed_cert)
@property
def state(self):
return defaults.state(self.parsed_cert)
@property
def location(self):
return defaults.location(self.parsed_cert)
@property
def distinguished_name(self):
return self.parsed_cert.subject.rfc4514_string()
"""
# Commenting this property as key_type is now added as a column. This code can be removed in future.
@property
def key_type(self):
if isinstance(self.parsed_cert.public_key(), rsa.RSAPublicKey):
return "RSA{key_size}".format(
key_size=self.parsed_cert.public_key().key_size
)
elif isinstance(self.parsed_cert.public_key(), ec.EllipticCurvePublicKey):
return get_key_type_from_ec_curve(self.parsed_cert.public_key().curve.name)
"""
@property
def validity_remaining(self):
return abs(self.not_after - arrow.utcnow())
@property
def validity_range(self):
return self.not_after - self.not_before
@property
def max_issuance_days(self):
public_CA = current_app.config.get("PUBLIC_CA_AUTHORITY_NAMES", [])
if self.name.lower() in [ca.lower() for ca in public_CA]:
return current_app.config.get("PUBLIC_CA_MAX_VALIDITY_DAYS", 397)
@property
def default_validity_days(self):
public_CA = current_app.config.get("PUBLIC_CA_AUTHORITY_NAMES", [])
if self.name.lower() in [ca.lower() for ca in public_CA]:
return current_app.config.get("PUBLIC_CA_MAX_VALIDITY_DAYS", 397)
return current_app.config.get("DEFAULT_VALIDITY_DAYS", 365) # 1 year default
@property
def subject(self):
return self.parsed_cert.subject
@property
def public_key(self):
return self.parsed_cert.public_key()
@hybrid_property
def expired(self):
# can't compare offset-naive and offset-aware datetimes
if arrow.Arrow.fromdatetime(self.not_after) <= arrow.utcnow():
return True
@expired.expression
def expired(cls):
return case([(cls.not_after <= arrow.utcnow(), True)], else_=False)
@hybrid_property
def revoked(self):
if "revoked" == self.status:
return True
@revoked.expression
def revoked(cls):
return case([(cls.status == "revoked", True)], else_=False)
@hybrid_property
def has_private_key(self):
return self.private_key is not None
@has_private_key.expression
def has_private_key(cls):
return case([(cls.private_key.is_(None), True)], else_=False)
@hybrid_property
def in_rotation_window(self):
"""
Determines if a certificate is available for rotation based
on the rotation policy associated.
:return:
"""
now = arrow.utcnow()
end = now + timedelta(days=self.rotation_policy.days)
if self.not_after <= end:
return True
@in_rotation_window.expression
def in_rotation_window(cls):
"""
Determines if a certificate is available for rotation based
on the rotation policy associated.
:return:
"""
return case(
[(extract("day", cls.not_after - func.now()) <= RotationPolicy.days, True)],
else_=False,
)
@property
def extensions(self):
# setup default values
return_extensions = {"sub_alt_names": {"names": []}}
try:
for extension in self.parsed_cert.extensions:
value = extension.value
if isinstance(value, x509.BasicConstraints):
return_extensions["basic_constraints"] = value
elif isinstance(value, x509.SubjectAlternativeName):
return_extensions["sub_alt_names"]["names"] = value
elif isinstance(value, x509.ExtendedKeyUsage):
return_extensions["extended_key_usage"] = value
elif isinstance(value, x509.KeyUsage):
return_extensions["key_usage"] = value
elif isinstance(value, x509.SubjectKeyIdentifier):
return_extensions["subject_key_identifier"] = {"include_ski": True}
elif isinstance(value, x509.AuthorityInformationAccess):
return_extensions["certificate_info_access"] = {"include_aia": True}
elif isinstance(value, x509.AuthorityKeyIdentifier):
aki = {"use_key_identifier": False, "use_authority_cert": False}
if value.key_identifier:
aki["use_key_identifier"] = True
if value.authority_cert_issuer:
aki["use_authority_cert"] = True
return_extensions["authority_key_identifier"] = aki
elif isinstance(value, x509.CRLDistributionPoints):
return_extensions["crl_distribution_points"] = {
"include_crl_dp": value
}
# TODO: Not supporting custom OIDs yet. https://github.com/Netflix/lemur/issues/665
else:
current_app.logger.warning(
"Custom OIDs not yet supported for clone operation."
)
except InvalidCodepoint as e:
sentry.captureException()
current_app.logger.warning(
"Unable to parse extensions due to underscore in dns name"
)
except ValueError as e:
sentry.captureException()
current_app.logger.warning("Unable to parse")
current_app.logger.exception(e)
return return_extensions
def __repr__(self):
return "Certificate(name={name})".format(name=self.name)
@event.listens_for(Certificate.destinations, "append")
def update_destinations(target, value, initiator):
"""
Attempt to upload certificate to the new destination
:param target:
:param value:
:param initiator:
:return:
"""
destination_plugin = plugins.get(value.plugin_name)
status = FAILURE_METRIC_STATUS
if target.expired:
return
try:
if target.private_key or not destination_plugin.requires_key:
destination_plugin.upload(
target.name,
target.body,
target.private_key,
target.chain,
value.options,
)
status = SUCCESS_METRIC_STATUS
except Exception as e:
sentry.captureException()
raise
metrics.send(
"destination_upload",
"counter",
1,
metric_tags={
"status": status,
"certificate": target.name,
"destination": value.label,
},
)
@event.listens_for(Certificate.replaces, "append")
def update_replacement(target, value, initiator):
"""
When a certificate is marked as 'replaced' we should not notify.
:param target:
:param value:
:param initiator:
:return:
"""
value.notify = False
| 31.982283 | 104 | 0.636794 | from datetime import timedelta
import arrow
from cryptography import x509
from flask import current_app
from idna.core import InvalidCodepoint
from sqlalchemy import (
event,
Integer,
ForeignKey,
String,
PassiveDefault,
func,
Column,
Text,
Boolean,
Index,
)
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import case, extract
from sqlalchemy_utils.types.arrow import ArrowType
from werkzeug.utils import cached_property
from lemur.common import defaults, utils, validators
from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS
from lemur.database import db
from lemur.domains.models import Domain
from lemur.extensions import metrics
from lemur.extensions import sentry
from lemur.models import (
certificate_associations,
certificate_source_associations,
certificate_destination_associations,
certificate_notification_associations,
certificate_replacement_associations,
roles_certificates,
pending_cert_replacement_associations,
)
from lemur.plugins.base import plugins
from lemur.policies.models import RotationPolicy
from lemur.utils import Vault
def get_sequence(name):
if "-" not in name:
return name, None
parts = name.split("-")
try:
seq = int(parts[-1])
except ValueError:
return name, None
if len(parts[-1]) == 8:
return name, None
root = "-".join(parts[:-1])
return root, seq
def get_or_increase_name(name, serial):
certificates = Certificate.query.filter(Certificate.name == name).all()
if not certificates:
return name
serial_name = "{0}-{1}".format(name, hex(int(serial))[2:].upper())
certificates = Certificate.query.filter(Certificate.name == serial_name).all()
if not certificates:
return serial_name
certificates = Certificate.query.filter(
Certificate.name.ilike("{0}%".format(serial_name))
).all()
ends = [0]
root, end = get_sequence(serial_name)
for cert in certificates:
root, end = get_sequence(cert.name)
if end:
ends.append(end)
return "{0}-{1}".format(root, max(ends) + 1)
class Certificate(db.Model):
__tablename__ = "certificates"
__table_args__ = (
Index(
"ix_certificates_cn",
"cn",
postgresql_ops={"cn": "gin_trgm_ops"},
postgresql_using="gin",
),
Index(
"ix_certificates_name",
"name",
postgresql_ops={"name": "gin_trgm_ops"},
postgresql_using="gin",
),
)
id = Column(Integer, primary_key=True)
ix = Index(
"ix_certificates_id_desc", id.desc(), postgresql_using="btree", unique=True
)
external_id = Column(String(128))
owner = Column(String(128), nullable=False)
name = Column(String(256), unique=True)
description = Column(String(1024))
notify = Column(Boolean, default=True)
body = Column(Text(), nullable=False)
chain = Column(Text())
csr = Column(Text())
private_key = Column(Vault)
issuer = Column(String(128))
serial = Column(String(128))
cn = Column(String(128))
deleted = Column(Boolean, index=True, default=False)
dns_provider_id = Column(
Integer(), ForeignKey("dns_providers.id", ondelete="CASCADE"), nullable=True
)
not_before = Column(ArrowType)
not_after = Column(ArrowType)
not_after_ix = Index("ix_certificates_not_after", not_after.desc())
date_created = Column(ArrowType, PassiveDefault(func.now()), nullable=False)
signing_algorithm = Column(String(128))
status = Column(String(128))
bits = Column(Integer())
san = Column(String(1024))
rotation = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey("users.id"))
authority_id = Column(Integer, ForeignKey("authorities.id", ondelete="CASCADE"))
root_authority_id = Column(
Integer, ForeignKey("authorities.id", ondelete="CASCADE")
)
rotation_policy_id = Column(Integer, ForeignKey("rotation_policies.id"))
key_type = Column(String(128))
notifications = relationship(
"Notification",
secondary=certificate_notification_associations,
backref="certificate",
)
destinations = relationship(
"Destination",
secondary=certificate_destination_associations,
backref="certificate",
)
sources = relationship(
"Source", secondary=certificate_source_associations, backref="certificate"
)
domains = relationship(
"Domain", secondary=certificate_associations, backref="certificate"
)
roles = relationship("Role", secondary=roles_certificates, backref="certificate")
replaces = relationship(
"Certificate",
secondary=certificate_replacement_associations,
primaryjoin=id == certificate_replacement_associations.c.certificate_id,
secondaryjoin=id
== certificate_replacement_associations.c.replaced_certificate_id,
backref="replaced",
)
replaced_by_pending = relationship(
"PendingCertificate",
secondary=pending_cert_replacement_associations,
backref="pending_replace",
viewonly=True,
)
logs = relationship("Log", backref="certificate")
endpoints = relationship("Endpoint", backref="certificate")
rotation_policy = relationship("RotationPolicy")
sensitive_fields = ("private_key",)
def __init__(self, **kwargs):
self.body = kwargs["body"].strip()
cert = self.parsed_cert
self.issuer = defaults.issuer(cert)
self.cn = defaults.common_name(cert)
self.san = defaults.san(cert)
self.not_before = defaults.not_before(cert)
self.not_after = defaults.not_after(cert)
self.serial = defaults.serial(cert)
if kwargs.get("name"):
self.name = get_or_increase_name(
defaults.text_to_slug(kwargs["name"]), self.serial
)
else:
self.name = get_or_increase_name(
defaults.certificate_name(
self.cn, self.issuer, self.not_before, self.not_after, self.san
),
self.serial,
)
self.owner = kwargs["owner"]
if kwargs.get("private_key"):
self.private_key = kwargs["private_key"].strip()
if kwargs.get("chain"):
self.chain = kwargs["chain"].strip()
if kwargs.get("csr"):
self.csr = kwargs["csr"].strip()
self.notify = kwargs.get("notify", True)
self.destinations = kwargs.get("destinations", [])
self.notifications = kwargs.get("notifications", [])
self.description = kwargs.get("description")
self.roles = list(set(kwargs.get("roles", [])))
self.replaces = kwargs.get("replaces", [])
self.rotation = kwargs.get("rotation")
self.rotation_policy = kwargs.get("rotation_policy")
self.signing_algorithm = defaults.signing_algorithm(cert)
self.bits = defaults.bitstrength(cert)
self.external_id = kwargs.get("external_id")
self.authority_id = kwargs.get("authority_id")
self.dns_provider_id = kwargs.get("dns_provider_id")
for domain in defaults.domains(cert):
self.domains.append(Domain(name=domain))
self.check_integrity()
def check_integrity(self):
if self.private_key:
validators.verify_private_key_match(
utils.parse_private_key(self.private_key),
self.parsed_cert,
error_class=AssertionError,
)
if self.chain:
chain = [self.parsed_cert] + utils.parse_cert_chain(self.chain)
validators.verify_cert_chain(chain, error_class=AssertionError)
@cached_property
def parsed_cert(self):
assert self.body, "Certificate body not set"
return utils.parse_certificate(self.body)
@property
def active(self):
return self.notify
@property
def organization(self):
return defaults.organization(self.parsed_cert)
@property
def organizational_unit(self):
return defaults.organizational_unit(self.parsed_cert)
@property
def country(self):
return defaults.country(self.parsed_cert)
@property
def state(self):
return defaults.state(self.parsed_cert)
@property
def location(self):
return defaults.location(self.parsed_cert)
@property
def distinguished_name(self):
return self.parsed_cert.subject.rfc4514_string()
@property
def validity_remaining(self):
return abs(self.not_after - arrow.utcnow())
@property
def validity_range(self):
return self.not_after - self.not_before
@property
def max_issuance_days(self):
public_CA = current_app.config.get("PUBLIC_CA_AUTHORITY_NAMES", [])
if self.name.lower() in [ca.lower() for ca in public_CA]:
return current_app.config.get("PUBLIC_CA_MAX_VALIDITY_DAYS", 397)
@property
def default_validity_days(self):
public_CA = current_app.config.get("PUBLIC_CA_AUTHORITY_NAMES", [])
if self.name.lower() in [ca.lower() for ca in public_CA]:
return current_app.config.get("PUBLIC_CA_MAX_VALIDITY_DAYS", 397)
return current_app.config.get("DEFAULT_VALIDITY_DAYS", 365)
@property
def subject(self):
return self.parsed_cert.subject
@property
def public_key(self):
return self.parsed_cert.public_key()
@hybrid_property
def expired(self):
if arrow.Arrow.fromdatetime(self.not_after) <= arrow.utcnow():
return True
@expired.expression
def expired(cls):
return case([(cls.not_after <= arrow.utcnow(), True)], else_=False)
@hybrid_property
def revoked(self):
if "revoked" == self.status:
return True
@revoked.expression
def revoked(cls):
return case([(cls.status == "revoked", True)], else_=False)
@hybrid_property
def has_private_key(self):
return self.private_key is not None
@has_private_key.expression
def has_private_key(cls):
return case([(cls.private_key.is_(None), True)], else_=False)
@hybrid_property
def in_rotation_window(self):
now = arrow.utcnow()
end = now + timedelta(days=self.rotation_policy.days)
if self.not_after <= end:
return True
@in_rotation_window.expression
def in_rotation_window(cls):
return case(
[(extract("day", cls.not_after - func.now()) <= RotationPolicy.days, True)],
else_=False,
)
@property
def extensions(self):
# setup default values
return_extensions = {"sub_alt_names": {"names": []}}
try:
for extension in self.parsed_cert.extensions:
value = extension.value
if isinstance(value, x509.BasicConstraints):
return_extensions["basic_constraints"] = value
elif isinstance(value, x509.SubjectAlternativeName):
return_extensions["sub_alt_names"]["names"] = value
elif isinstance(value, x509.ExtendedKeyUsage):
return_extensions["extended_key_usage"] = value
elif isinstance(value, x509.KeyUsage):
return_extensions["key_usage"] = value
elif isinstance(value, x509.SubjectKeyIdentifier):
return_extensions["subject_key_identifier"] = {"include_ski": True}
elif isinstance(value, x509.AuthorityInformationAccess):
return_extensions["certificate_info_access"] = {"include_aia": True}
elif isinstance(value, x509.AuthorityKeyIdentifier):
aki = {"use_key_identifier": False, "use_authority_cert": False}
if value.key_identifier:
aki["use_key_identifier"] = True
if value.authority_cert_issuer:
aki["use_authority_cert"] = True
return_extensions["authority_key_identifier"] = aki
elif isinstance(value, x509.CRLDistributionPoints):
return_extensions["crl_distribution_points"] = {
"include_crl_dp": value
}
# TODO: Not supporting custom OIDs yet. https://github.com/Netflix/lemur/issues/665
else:
current_app.logger.warning(
"Custom OIDs not yet supported for clone operation."
)
except InvalidCodepoint as e:
sentry.captureException()
current_app.logger.warning(
"Unable to parse extensions due to underscore in dns name"
)
except ValueError as e:
sentry.captureException()
current_app.logger.warning("Unable to parse")
current_app.logger.exception(e)
return return_extensions
def __repr__(self):
return "Certificate(name={name})".format(name=self.name)
@event.listens_for(Certificate.destinations, "append")
def update_destinations(target, value, initiator):
destination_plugin = plugins.get(value.plugin_name)
status = FAILURE_METRIC_STATUS
if target.expired:
return
try:
if target.private_key or not destination_plugin.requires_key:
destination_plugin.upload(
target.name,
target.body,
target.private_key,
target.chain,
value.options,
)
status = SUCCESS_METRIC_STATUS
except Exception as e:
sentry.captureException()
raise
metrics.send(
"destination_upload",
"counter",
1,
metric_tags={
"status": status,
"certificate": target.name,
"destination": value.label,
},
)
@event.listens_for(Certificate.replaces, "append")
def update_replacement(target, value, initiator):
value.notify = False
| true | true |
f71d21a276159adb2c085859eba9608d1e5a5fba | 9,792 | py | Python | intersight/model/niatelemetry_bootflash_details_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/niatelemetry_bootflash_details_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/niatelemetry_bootflash_details_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NiatelemetryBootflashDetailsAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
('object_type',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'fw_rev': (str,), # noqa: E501
'model_type': (str,), # noqa: E501
'serial': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'fw_rev': 'FwRev', # noqa: E501
'model_type': 'ModelType', # noqa: E501
'serial': 'Serial', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetryBootflashDetailsAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "niatelemetry.BootflashDetails", must be one of ["niatelemetry.BootflashDetails", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "niatelemetry.BootflashDetails", must be one of ["niatelemetry.BootflashDetails", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fw_rev (str): Return firmware revision in boot flash details.. [optional] # noqa: E501
model_type (str): Return model type in boot flash details.. [optional] # noqa: E501
serial (str): Return serial id in boot flash details.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "niatelemetry.BootflashDetails")
object_type = kwargs.get('object_type', "niatelemetry.BootflashDetails")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 51 | 1,678 | 0.635621 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NiatelemetryBootflashDetailsAllOf(ModelNormal):
allowed_values = {
('class_id',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
('object_type',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
return {
'class_id': (str,),
'object_type': (str,),
'fw_rev': (str,),
'model_type': (str,),
'serial': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'fw_rev': 'FwRev',
'model_type': 'ModelType',
'serial': 'Serial',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
class_id = kwargs.get('class_id', "niatelemetry.BootflashDetails")
object_type = kwargs.get('object_type', "niatelemetry.BootflashDetails")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true | true |
f71d21c4b3c202177b850cb894bc74b88806a333 | 987 | py | Python | kubernetes/test/test_v1_secret_key_selector.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_secret_key_selector.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_secret_key_selector.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_secret_key_selector import V1SecretKeySelector
class TestV1SecretKeySelector(unittest.TestCase):
""" V1SecretKeySelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretKeySelector(self):
"""
Test V1SecretKeySelector
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_secret_key_selector.V1SecretKeySelector()
pass
if __name__ == '__main__':
unittest.main()
| 21.933333 | 105 | 0.716312 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_secret_key_selector import V1SecretKeySelector
class TestV1SecretKeySelector(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretKeySelector(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f71d21d543fb1b6678cb67ac7df8c90a77d990dc | 27,520 | py | Python | tests/test_game.py | NWalker4483/MSUreconchess | a95659b7fbf71ac3ea01d19c7ec9f3ed63922c86 | [
"BSD-3-Clause"
] | null | null | null | tests/test_game.py | NWalker4483/MSUreconchess | a95659b7fbf71ac3ea01d19c7ec9f3ed63922c86 | [
"BSD-3-Clause"
] | null | null | null | tests/test_game.py | NWalker4483/MSUreconchess | a95659b7fbf71ac3ea01d19c7ec9f3ed63922c86 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from reconchess import LocalGame, WinReason
from chess import *
import time
import random
SENSE_BY_SQUARE = {
A8: [A8, B8, A7, B7],
B8: [A8, B8, C8, A7, B7, C7],
C8: [B8, C8, D8, B7, C7, D7],
D8: [C8, D8, E8, C7, D7, E7],
E8: [D8, E8, F8, D7, E7, F7],
F8: [E8, F8, G8, E7, F7, G7],
G8: [F8, G8, H8, F7, G7, H7],
H8: [G8, H8, G7, H7],
A7: [A8, B8, A7, B7, A6, B6],
B7: [A8, B8, C8, A7, B7, C7, A6, B6, C6],
C7: [B8, C8, D8, B7, C7, D7, B6, C6, D6],
D7: [C8, D8, E8, C7, D7, E7, C6, D6, E6],
E7: [D8, E8, F8, D7, E7, F7, D6, E6, F6],
F7: [E8, F8, G8, E7, F7, G7, E6, F6, G6],
G7: [F8, G8, H8, F7, G7, H7, F6, G6, H6],
H7: [G8, H8, G7, H7, G6, H6],
A6: [A7, B7, A6, B6, A5, B5],
B6: [A7, B7, C7, A6, B6, C6, A5, B5, C5],
C6: [B7, C7, D7, B6, C6, D6, B5, C5, D5],
D6: [C7, D7, E7, C6, D6, E6, C5, D5, E5],
E6: [D7, E7, F7, D6, E6, F6, D5, E5, F5],
F6: [E7, F7, G7, E6, F6, G6, E5, F5, G5],
G6: [F7, G7, H7, F6, G6, H6, F5, G5, H5],
H6: [G7, H7, G6, H6, G5, H5],
A5: [A6, B6, A5, B5, A4, B4],
B5: [A6, B6, C6, A5, B5, C5, A4, B4, C4],
C5: [B6, C6, D6, B5, C5, D5, B4, C4, D4],
D5: [C6, D6, E6, C5, D5, E5, C4, D4, E4],
E5: [D6, E6, F6, D5, E5, F5, D4, E4, F4],
F5: [E6, F6, G6, E5, F5, G5, E4, F4, G4],
G5: [F6, G6, H6, F5, G5, H5, F4, G4, H4],
H5: [G6, H6, G5, H5, G4, H4],
A4: [A5, B5, A4, B4, A3, B3],
B4: [A5, B5, C5, A4, B4, C4, A3, B3, C3],
C4: [B5, C5, D5, B4, C4, D4, B3, C3, D3],
D4: [C5, D5, E5, C4, D4, E4, C3, D3, E3],
E4: [D5, E5, F5, D4, E4, F4, D3, E3, F3],
F4: [E5, F5, G5, E4, F4, G4, E3, F3, G3],
G4: [F5, G5, H5, F4, G4, H4, F3, G3, H3],
H4: [G5, H5, G4, H4, G3, H3],
A3: [A4, B4, A3, B3, A2, B2],
B3: [A4, B4, C4, A3, B3, C3, A2, B2, C2],
C3: [B4, C4, D4, B3, C3, D3, B2, C2, D2],
D3: [C4, D4, E4, C3, D3, E3, C2, D2, E2],
E3: [D4, E4, F4, D3, E3, F3, D2, E2, F2],
F3: [E4, F4, G4, E3, F3, G3, E2, F2, G2],
G3: [F4, G4, H4, F3, G3, H3, F2, G2, H2],
H3: [G4, H4, G3, H3, G2, H2],
A2: [A3, B3, A2, B2, A1, B1],
B2: [A3, B3, C3, A2, B2, C2, A1, B1, C1],
C2: [B3, C3, D3, B2, C2, D2, B1, C1, D1],
D2: [C3, D3, E3, C2, D2, E2, C1, D1, E1],
E2: [D3, E3, F3, D2, E2, F2, D1, E1, F1],
F2: [E3, F3, G3, E2, F2, G2, E1, F1, G1],
G2: [F3, G3, H3, F2, G2, H2, F1, G1, H1],
H2: [G3, H3, G2, H2, G1, H1],
A1: [A2, B2, A1, B1],
B1: [A2, B2, C2, A1, B1, C1],
C1: [B2, C2, D2, B1, C1, D1],
D1: [C2, D2, E2, C1, D1, E1],
E1: [D2, E2, F2, D1, E1, F1],
F1: [E2, F2, G2, E1, F1, G1],
G1: [F2, G2, H2, F1, G1, H1],
H1: [G2, H2, G1, H1],
}
class LocalGameSenseTest(unittest.TestCase):
def setUp(self):
self.game = LocalGame()
def test_senses_actions_content(self):
sense_actions = self.game.sense_actions()
for square in SQUARES:
self.assertIn(square, sense_actions)
def test_sense_invalid(self):
for square in [-1, 65, 66, 1023730, -2]:
with self.assertRaises(ValueError):
self.game.sense(square)
def test_sense_squares(self):
for square in SQUARES:
sense_result = self.game.sense(square)
squares = [s for s, p in sense_result]
self.assertEqual(squares, SENSE_BY_SQUARE[square])
def test_sense_pieces(self):
for sense_square in SQUARES:
sense_result = self.game.sense(sense_square)
for square, piece in sense_result:
self.assertEqual(piece, self.game.board.piece_at(square))
class LocalGameTimeTest(unittest.TestCase):
def test_time(self, seconds=1, turns=20, phases=3):
delta = seconds / (turns * phases)
game = LocalGame(seconds_per_player=seconds)
turn = True
time_by_color = game.seconds_left_by_color.copy()
game.start()
for i in range(turns):
for _ in range(phases):
start = game.get_seconds_left()
time.sleep(delta)
end = game.get_seconds_left()
self.assertAlmostEqual(start - end, delta, places=2)
time_by_color[turn] = game.get_seconds_left()
turn = not turn
game.end_turn()
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
game.end()
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
time.sleep(delta)
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
class LocalGameMoveActionsTest(unittest.TestCase):
STARTING_WHITE_PAWN_CAPTURES = [
Move(A2, B3),
Move(B2, A3), Move(B2, C3),
Move(C2, B3), Move(C2, D3),
Move(D2, C3), Move(D2, E3),
Move(E2, D3), Move(E2, F3),
Move(F2, E3), Move(F2, G3),
Move(G2, F3), Move(G2, H3),
Move(H2, G3),
]
BLACK_STARTING_PAWN_CAPTURES = [
Move(A7, B6),
Move(B7, A6), Move(B7, C6),
Move(C7, B6), Move(C7, D6),
Move(D7, C6), Move(D7, E6),
Move(E7, D6), Move(E7, F6),
Move(F7, E6), Move(F7, G6),
Move(G7, F6), Move(G7, H6),
Move(H7, G6),
]
def setUp(self):
self.game = LocalGame()
def test_starting_pawn_capture_moves(self):
move_actions = self.game.move_actions()
for move in self.STARTING_WHITE_PAWN_CAPTURES:
self.assertIn(move, move_actions)
self.game.board.turn = BLACK
move_actions = self.game.move_actions()
for move in self.BLACK_STARTING_PAWN_CAPTURES:
self.assertIn(move, move_actions)
def test_pass(self):
self.assertNotIn(None, self.game.move_actions())
self.assertNotIn(Move.null(), self.game.move_actions())
def test_superset_fuzz(self, max_turns=500):
turn = 1
while not self.game.board.is_game_over() and turn < max_turns:
truth_moves = set(self.game.board.generate_pseudo_legal_moves())
recon_moves = set(self.game.move_actions())
self.assertTrue(recon_moves.issuperset(truth_moves))
self.game.board.push(random.sample(truth_moves, 1)[0])
turn += 1
class LocalGameMoveTest(unittest.TestCase):
def setUp(self):
self.game = LocalGame()
def test_legal_kingside_castle(self):
"""
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
R . . . K . . R
"""
self.game.board.set_board_fen('8/8/8/8/8/8/8/R3K2R')
self.game.board.set_castling_fen('KQkq')
req, taken, opt_capture = self.game.move(Move(E1, G1))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.assertEqual(self.game.board.board_fen(), '8/8/8/8/8/8/8/R4RK1')
def test_legal_queenside_castle(self):
"""
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
R . . . K . . R
"""
self.game.board.set_board_fen('8/8/8/8/8/8/8/R3K2R')
self.game.board.set_castling_fen('KQkq')
req, taken, opt_capture = self.game.move(Move(E1, C1))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.assertEqual(self.game.board.board_fen(), '8/8/8/8/8/8/8/2KR3R')
def test_queenside_castle_piece_between(self):
"""
r . P . k b n r r . . P k b n r r P . . k b n r
p p . p p p p p p p . p p p p p p p . p p p p p
. . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . .
P P . P P P P P P P . P P P P P P P . P P P P P
R . p . K B N R R . . p K B N R R p . . K B N R
"""
for fen in ['r1P1kbnr/pp1ppppp/8/8/8/8/PP1PPPPP/R1p1KBNR',
'r2Pkbnr/pp1ppppp/8/8/8/8/PP1PPPPP/R2pKBNR',
'rP2kbnr/pp1ppppp/8/8/8/8/PP1PPPPP/Rp2KBNR']:
self.game.board.set_board_fen(fen)
self.game.board.turn = WHITE
self.game.turn = WHITE
req, tak, opt_capture = self.game.move(Move(E1, C1))
self.assertEqual(tak, None)
self.game.board.turn = BLACK
self.game.turn = BLACK
req, tak, opt_capture = self.game.move(Move(E8, C8))
self.assertEqual(tak, None)
def test_kingside_castle_piece_between(self):
"""
r n b q k P . r r n b q k . P r
p p p p p . p p p p p p p . p p
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . .
P P P P P . P P P P P P P . P P
R N B Q K p . R R N B Q K . p R
:return:
"""
for fen in ['rnbqkP1r/ppppp1pp/8/8/8/8/PPPPP1PP/RNBQKp1R',
'rnbqk1Pr/ppppp1pp/8/8/8/8/PPPPP1PP/RNBQK1pR']:
self.game.board.set_board_fen(fen)
self.game.board.turn = WHITE
self.game.turn = WHITE
req, tak, opt_capture = self.game.move(Move(E1, G1))
self.assertEqual(tak, None)
self.game.board.turn = BLACK
self.game.turn = BLACK
req, tak, opt_capture = self.game.move(Move(E8, G8))
self.assertEqual(tak, None)
def test_queenside_castle_no_rights(self):
"""
r . . . k . . r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R . . . K . . R
"""
self.game.board.set_board_fen('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R')
self.game.board.turn = WHITE
self.game.turn = WHITE
for castling_fen in ['-', 'k', 'q', 'kq', 'Kk', 'Kq', 'Kkq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E1, C1))
self.game.board.turn = BLACK
self.game.turn = BLACK
for castling_fen in ['-', 'K', 'Q', 'KQ', 'Kk', 'Qk', 'KQk']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E8, C8))
def test_kingside_castle_no_rights(self):
"""
r . . . k . . r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R . . . K . . R
"""
self.game.board.set_board_fen('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R')
self.game.board.turn = WHITE
self.game.turn = WHITE
for castling_fen in ['-', 'k', 'q', 'kq', 'Qk', 'Qq', 'Qkq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E1, G1))
self.game.board.turn = BLACK
self.game.turn = BLACK
for castling_fen in ['-', 'K', 'Q', 'KQ', 'Kq', 'Qq', 'KQq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E8, G8))
def test_castling_into_check(self):
"""
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . q .
. . . . . . . .
. . . . . . . .
. . . . K . . R
"""
self.game.board.set_board_fen('8/8/8/8/6q1/8/8/4K2R')
self.assertFalse(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertTrue(self.game.board.is_check())
def test_castling_out_of_check(self):
"""
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
q . . . K . . R
"""
self.game.board.set_board_fen('8/8/8/8/8/8/8/q3K2R')
self.assertTrue(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertFalse(self.game.board.is_check())
def test_castling_stay_in_check(self):
"""
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . q .
. . . . . . . .
. . . . K . . R
"""
self.game.board.set_board_fen('8/8/8/8/8/6q1/8/4K2R')
self.assertTrue(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertTrue(self.game.board.is_check())
def test_en_passant_white(self):
"""
r n b q k b n r
p . p p p p p p
. . . . . . . .
. . . . . . . .
P p . . . . . .
. . . . . . . .
. P P P P P P P
R N B Q K B N R
"""
# test that en passant captures result in the correct capture square
self.game.board.set_board_fen('rnbqkbnr/p1pppppp/8/8/1p6/8/PPPPPPPP/RNBQKBNR')
req, taken, opt_capture = self.game.move(Move(A2, A4))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
req, taken, opt_capture = self.game.move(Move(B4, A3))
self.assertEqual(req, taken)
self.assertIsNotNone(opt_capture)
self.assertEqual(opt_capture, A4)
def test_en_passant_black(self):
"""
r n b q k b n r
p p p p p . p p
. . . . . . . .
. . . . . p P .
. . . . . . . .
. . . . . . . .
P P P P P P . P
R N B Q K B N R
"""
# test that en passant captures result in the correct capture square
self.game.board.set_board_fen('rnbqkbnr/pppppppp/8/6P1/8/8/PPPPPP1P/RNBQKBNR')
self.game.turn = BLACK
self.game.board.turn = BLACK
req, taken, opt_capture = self.game.move(Move(F7, F5))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
req, taken, opt_capture = self.game.move(Move(G5, F6))
self.assertEqual(req, taken)
self.assertIsNotNone(opt_capture)
self.assertEqual(opt_capture, F5)
def test_move_opponent_piece(self):
# test moving opponent pieces
b = Board()
b.turn = BLACK
for move in b.generate_pseudo_legal_moves():
with self.assertRaises(ValueError):
self.game.move(move)
def test_move_no_piece(self):
# test a move from a square with no piece
for from_square in SquareSet(BB_RANK_3 | BB_RANK_4 | BB_RANK_5 | BB_RANK_6):
for to_square in SQUARES:
with self.assertRaises(ValueError):
m = Move(from_square, to_square)
self.game.move(m)
def test_move_illegal(self):
for from_square in SquareSet(BB_RANK_1 | BB_RANK_2):
for to_square in SQUARES:
move = Move(from_square, to_square)
if move not in self.game.move_actions():
with self.assertRaises(ValueError):
self.game.move(move)
def test_sliding_straight_capture(self):
"""
. . . . . . . .
. . . p . . . .
. . . . . . . .
. p . R . p . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
"""
result_by_move = {
Move(D5, C5): (Move(D5, C5), None),
Move(D5, B5): (Move(D5, B5), B5),
Move(D5, A5): (Move(D5, B5), B5),
Move(D5, D6): (Move(D5, D6), None),
Move(D5, D7): (Move(D5, D7), D7),
Move(D5, D8): (Move(D5, D7), D7),
Move(D5, E5): (Move(D5, E5), None),
Move(D5, F5): (Move(D5, F5), F5),
Move(D5, G5): (Move(D5, F5), F5),
Move(D5, H5): (Move(D5, F5), F5),
Move(D5, D4): (Move(D5, D4), None),
Move(D5, D3): (Move(D5, D3), None),
Move(D5, D2): (Move(D5, D2), None),
Move(D5, D1): (Move(D5, D1), None),
}
for expected_req, (expected_taken, expected_capture) in result_by_move.items():
self.game.board.set_board_fen('8/3p4/8/1p1R1p2/8/8/8/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
req, taken, opt_capture = self.game.move(expected_req)
self.assertEqual(req, expected_req)
self.assertEqual(taken, expected_taken)
self.assertEqual(opt_capture, expected_capture)
def test_sliding_straight_into_ally(self):
"""
. . . . . . . .
. . . p . . . .
. . . . . . . .
. p . R . p . .
. . . . . . . .
. . . . . . . .
. . . P . . . .
. . . . . . . .
"""
for move in [Move(D5, D2), Move(D5, D1)]:
self.game.board.set_board_fen('8/3p4/8/1p1R1p2/8/8/3P4/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
with self.assertRaises(ValueError):
req, taken, opt_capture = self.game.move(move)
def test_sliding_diagonal_capture(self):
"""
p . . . . . p .
. . . . . . . .
. . . . . . . .
. . . X . . . .
. . . . . . . .
. . . . . . . .
p . . . . . p .
. . . . . . . .
"""
result_by_move = {
Move(D5, C6): (Move(D5, C6), None),
Move(D5, B7): (Move(D5, B7), None),
Move(D5, A8): (Move(D5, A8), A8),
Move(D5, E6): (Move(D5, E6), None),
Move(D5, F7): (Move(D5, F7), None),
Move(D5, G8): (Move(D5, G8), G8),
Move(D5, E4): (Move(D5, E4), None),
Move(D5, F3): (Move(D5, F3), None),
Move(D5, G2): (Move(D5, G2), G2),
Move(D5, H1): (Move(D5, G2), G2),
Move(D5, C4): (Move(D5, C4), None),
Move(D5, B3): (Move(D5, B3), None),
Move(D5, A2): (Move(D5, A2), A2),
}
for expected_req, (expected_taken, expected_capture) in result_by_move.items():
self.game.board.set_board_fen('p5p1/8/8/3B4/8/8/p5p1/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
req, taken, opt_capture = self.game.move(expected_req)
self.assertEqual(req, expected_req)
self.assertEqual(taken, expected_taken)
self.assertEqual(opt_capture, expected_capture)
def test_sliding_diagonal_into_ally(self):
"""
p . . . . . p .
. . . . . . . .
. . . . . . . .
. . . X . . . .
. . . . . . . .
. . . . . . . .
p . . . . . P .
. . . . . . . .
"""
for move in [Move(D5, G2), Move(D5, H1)]:
self.game.board.set_board_fen('p5p1/8/8/3B4/8/8/p5P1/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
with self.assertRaises(ValueError):
req, taken, opt_capture = self.game.move(move)
def test_pawn_auto_promotion(self):
"""
. . . . . . . .
. . . P . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
"""
self.game.board.set_board_fen('8/3P4/8/8/8/8/8/8')
req, taken, opt_capture = self.game.move(Move(D7, D8))
self.assertEqual(Move(D7, D8), req)
self.assertNotEqual(req, taken)
self.assertEqual(req.to_square, taken.to_square)
self.assertEqual(req.from_square, taken.from_square)
self.assertIsNone(req.promotion)
self.assertEqual(taken.promotion, QUEEN)
def test_pass(self):
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
self.game.board.turn = BLACK
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.game.board.remove_piece_at(0)
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
def test_legal_fuzz(self, max_turns=500):
board = Board()
turn = 1
while not board.is_game_over() and turn < max_turns:
move = random.choice(list(board.generate_pseudo_legal_moves()) + [None])
req, taken, opt_square = self.game.move(move)
self.assertEqual(req, taken)
if move is not None and board.is_capture(move):
self.assertIsNotNone(opt_square)
board.push(move if move is not None else Move.null())
self.assertEqual(self.game.board, board)
turn += 1
class OpponentMoveResultsTestCase(unittest.TestCase):
def test_no_capture(self):
game = LocalGame()
game.start()
_, _, result1 = game.move(Move(A2, A4))
self.assertIsNone(result1)
self.assertEqual(result1, game.opponent_move_results())
game.end_turn()
self.assertEqual(result1, game.opponent_move_results())
game.sense(E5)
self.assertEqual(result1, game.opponent_move_results())
_, _, result2 = game.move(Move(F7, F5))
self.assertIsNone(result2)
self.assertEqual(result2, game.opponent_move_results())
def test_capture(self):
"""
r n b q k b n r
p p p . . p p p
. . . . . . . .
. . . p p . . .
. . . P P . . .
. . . . . . . .
P P P . . P P P
R N B Q K B N R
"""
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/ppp2ppp/8/3pp3/3PP3/8/PPP2PPP/RNBQKBNR')
game.start()
_, _, result1 = game.move(Move(D4, E5))
self.assertEqual(result1, E5)
self.assertEqual(result1, game.opponent_move_results())
game.end_turn()
self.assertEqual(result1, game.opponent_move_results())
game.sense(E5)
self.assertEqual(result1, game.opponent_move_results())
_, _, result2 = game.move(Move(D5, E4))
self.assertEqual(result2, E4)
self.assertEqual(result2, game.opponent_move_results())
class IsOverTest(unittest.TestCase):
def test_not_over(self):
game = LocalGame()
game.start()
self.assertFalse(game.is_over())
def test_forced_over(self):
game = LocalGame()
game.start()
self.assertFalse(game.is_over())
game.end()
self.assertTrue(game.is_over())
def test_no_time_both(self):
game = LocalGame(seconds_per_player=0)
game.start()
self.assertTrue(game.is_over())
def test_no_time_white(self):
game = LocalGame()
game.seconds_left_by_color[WHITE] = 0
game.start()
self.assertTrue(game.is_over())
def test_no_time_black(self):
game = LocalGame()
game.seconds_left_by_color[BLACK] = 0
game.start()
self.assertTrue(game.is_over())
def test_white_king_captured(self):
"""
r n b q k b n r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R N B Q . B N R
"""
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQ1BNR')
game.start()
self.assertTrue(game.is_over())
def test_black_king_captured(self):
"""
r n b q . b n r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R N B Q K B N R
"""
game = LocalGame()
game.board.set_board_fen('rnbq1bnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR')
game.start()
self.assertTrue(game.is_over())
class WinnerInfoTestCase(unittest.TestCase):
def test_not_over(self):
game = LocalGame()
game.start()
self.assertIsNone(game.get_winner_color())
self.assertIsNone(game.get_win_reason())
def test_forced_over(self):
game = LocalGame()
game.start()
game.end()
self.assertIsNone(game.get_winner_color())
self.assertIsNone(game.get_win_reason())
def test_no_time_white(self):
game = LocalGame()
game.seconds_left_by_color[WHITE] = 0
game.start()
self.assertEqual(BLACK, game.get_winner_color())
self.assertEqual(WinReason.TIMEOUT, game.get_win_reason())
def test_no_time_black(self):
game = LocalGame()
game.seconds_left_by_color[BLACK] = 0
game.start()
self.assertEqual(WHITE, game.get_winner_color())
self.assertEqual(WinReason.TIMEOUT, game.get_win_reason())
def test_white_king_captured(self):
"""
r n b q k b n r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R N B Q . B N R
"""
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQ1BNR')
game.start()
self.assertEqual(BLACK, game.get_winner_color())
self.assertEqual(WinReason.KING_CAPTURE, game.get_win_reason())
def test_black_king_captured(self):
"""
r n b q . b n r
p p p p p p p p
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
P P P P P P P P
R N B Q K B N R
"""
game = LocalGame()
game.board.set_board_fen('rnbq1bnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR')
game.start()
self.assertEqual(WHITE, game.get_winner_color())
self.assertEqual(WinReason.KING_CAPTURE, game.get_win_reason())
class GetGameHistoryTestCase(unittest.TestCase):
def test_no_history_until_game_over(self):
g = LocalGame()
g.sense(E2)
self.assertEqual(g.get_game_history(), None)
g.move(Move(E2, E4))
self.assertEqual(g.get_game_history(), None)
g.sense(A8)
g.move(Move(E7, E5))
self.assertEqual(g.get_game_history(), None)
g.sense(E2)
g.move(Move(F1, B5))
g.sense(A8)
g.move(Move(D7, D5))
g.sense(E8)
g.move(Move(B5, E8))
self.assertTrue(g.is_over())
self.assertNotEqual(g.get_game_history(), None)
| 33.643032 | 90 | 0.496475 | import unittest
from reconchess import LocalGame, WinReason
from chess import *
import time
import random
SENSE_BY_SQUARE = {
A8: [A8, B8, A7, B7],
B8: [A8, B8, C8, A7, B7, C7],
C8: [B8, C8, D8, B7, C7, D7],
D8: [C8, D8, E8, C7, D7, E7],
E8: [D8, E8, F8, D7, E7, F7],
F8: [E8, F8, G8, E7, F7, G7],
G8: [F8, G8, H8, F7, G7, H7],
H8: [G8, H8, G7, H7],
A7: [A8, B8, A7, B7, A6, B6],
B7: [A8, B8, C8, A7, B7, C7, A6, B6, C6],
C7: [B8, C8, D8, B7, C7, D7, B6, C6, D6],
D7: [C8, D8, E8, C7, D7, E7, C6, D6, E6],
E7: [D8, E8, F8, D7, E7, F7, D6, E6, F6],
F7: [E8, F8, G8, E7, F7, G7, E6, F6, G6],
G7: [F8, G8, H8, F7, G7, H7, F6, G6, H6],
H7: [G8, H8, G7, H7, G6, H6],
A6: [A7, B7, A6, B6, A5, B5],
B6: [A7, B7, C7, A6, B6, C6, A5, B5, C5],
C6: [B7, C7, D7, B6, C6, D6, B5, C5, D5],
D6: [C7, D7, E7, C6, D6, E6, C5, D5, E5],
E6: [D7, E7, F7, D6, E6, F6, D5, E5, F5],
F6: [E7, F7, G7, E6, F6, G6, E5, F5, G5],
G6: [F7, G7, H7, F6, G6, H6, F5, G5, H5],
H6: [G7, H7, G6, H6, G5, H5],
A5: [A6, B6, A5, B5, A4, B4],
B5: [A6, B6, C6, A5, B5, C5, A4, B4, C4],
C5: [B6, C6, D6, B5, C5, D5, B4, C4, D4],
D5: [C6, D6, E6, C5, D5, E5, C4, D4, E4],
E5: [D6, E6, F6, D5, E5, F5, D4, E4, F4],
F5: [E6, F6, G6, E5, F5, G5, E4, F4, G4],
G5: [F6, G6, H6, F5, G5, H5, F4, G4, H4],
H5: [G6, H6, G5, H5, G4, H4],
A4: [A5, B5, A4, B4, A3, B3],
B4: [A5, B5, C5, A4, B4, C4, A3, B3, C3],
C4: [B5, C5, D5, B4, C4, D4, B3, C3, D3],
D4: [C5, D5, E5, C4, D4, E4, C3, D3, E3],
E4: [D5, E5, F5, D4, E4, F4, D3, E3, F3],
F4: [E5, F5, G5, E4, F4, G4, E3, F3, G3],
G4: [F5, G5, H5, F4, G4, H4, F3, G3, H3],
H4: [G5, H5, G4, H4, G3, H3],
A3: [A4, B4, A3, B3, A2, B2],
B3: [A4, B4, C4, A3, B3, C3, A2, B2, C2],
C3: [B4, C4, D4, B3, C3, D3, B2, C2, D2],
D3: [C4, D4, E4, C3, D3, E3, C2, D2, E2],
E3: [D4, E4, F4, D3, E3, F3, D2, E2, F2],
F3: [E4, F4, G4, E3, F3, G3, E2, F2, G2],
G3: [F4, G4, H4, F3, G3, H3, F2, G2, H2],
H3: [G4, H4, G3, H3, G2, H2],
A2: [A3, B3, A2, B2, A1, B1],
B2: [A3, B3, C3, A2, B2, C2, A1, B1, C1],
C2: [B3, C3, D3, B2, C2, D2, B1, C1, D1],
D2: [C3, D3, E3, C2, D2, E2, C1, D1, E1],
E2: [D3, E3, F3, D2, E2, F2, D1, E1, F1],
F2: [E3, F3, G3, E2, F2, G2, E1, F1, G1],
G2: [F3, G3, H3, F2, G2, H2, F1, G1, H1],
H2: [G3, H3, G2, H2, G1, H1],
A1: [A2, B2, A1, B1],
B1: [A2, B2, C2, A1, B1, C1],
C1: [B2, C2, D2, B1, C1, D1],
D1: [C2, D2, E2, C1, D1, E1],
E1: [D2, E2, F2, D1, E1, F1],
F1: [E2, F2, G2, E1, F1, G1],
G1: [F2, G2, H2, F1, G1, H1],
H1: [G2, H2, G1, H1],
}
class LocalGameSenseTest(unittest.TestCase):
def setUp(self):
self.game = LocalGame()
def test_senses_actions_content(self):
sense_actions = self.game.sense_actions()
for square in SQUARES:
self.assertIn(square, sense_actions)
def test_sense_invalid(self):
for square in [-1, 65, 66, 1023730, -2]:
with self.assertRaises(ValueError):
self.game.sense(square)
def test_sense_squares(self):
for square in SQUARES:
sense_result = self.game.sense(square)
squares = [s for s, p in sense_result]
self.assertEqual(squares, SENSE_BY_SQUARE[square])
def test_sense_pieces(self):
for sense_square in SQUARES:
sense_result = self.game.sense(sense_square)
for square, piece in sense_result:
self.assertEqual(piece, self.game.board.piece_at(square))
class LocalGameTimeTest(unittest.TestCase):
def test_time(self, seconds=1, turns=20, phases=3):
delta = seconds / (turns * phases)
game = LocalGame(seconds_per_player=seconds)
turn = True
time_by_color = game.seconds_left_by_color.copy()
game.start()
for i in range(turns):
for _ in range(phases):
start = game.get_seconds_left()
time.sleep(delta)
end = game.get_seconds_left()
self.assertAlmostEqual(start - end, delta, places=2)
time_by_color[turn] = game.get_seconds_left()
turn = not turn
game.end_turn()
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
game.end()
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
time.sleep(delta)
self.assertAlmostEqual(game.get_seconds_left(), time_by_color[turn], places=2)
class LocalGameMoveActionsTest(unittest.TestCase):
STARTING_WHITE_PAWN_CAPTURES = [
Move(A2, B3),
Move(B2, A3), Move(B2, C3),
Move(C2, B3), Move(C2, D3),
Move(D2, C3), Move(D2, E3),
Move(E2, D3), Move(E2, F3),
Move(F2, E3), Move(F2, G3),
Move(G2, F3), Move(G2, H3),
Move(H2, G3),
]
BLACK_STARTING_PAWN_CAPTURES = [
Move(A7, B6),
Move(B7, A6), Move(B7, C6),
Move(C7, B6), Move(C7, D6),
Move(D7, C6), Move(D7, E6),
Move(E7, D6), Move(E7, F6),
Move(F7, E6), Move(F7, G6),
Move(G7, F6), Move(G7, H6),
Move(H7, G6),
]
def setUp(self):
self.game = LocalGame()
def test_starting_pawn_capture_moves(self):
move_actions = self.game.move_actions()
for move in self.STARTING_WHITE_PAWN_CAPTURES:
self.assertIn(move, move_actions)
self.game.board.turn = BLACK
move_actions = self.game.move_actions()
for move in self.BLACK_STARTING_PAWN_CAPTURES:
self.assertIn(move, move_actions)
def test_pass(self):
self.assertNotIn(None, self.game.move_actions())
self.assertNotIn(Move.null(), self.game.move_actions())
def test_superset_fuzz(self, max_turns=500):
turn = 1
while not self.game.board.is_game_over() and turn < max_turns:
truth_moves = set(self.game.board.generate_pseudo_legal_moves())
recon_moves = set(self.game.move_actions())
self.assertTrue(recon_moves.issuperset(truth_moves))
self.game.board.push(random.sample(truth_moves, 1)[0])
turn += 1
class LocalGameMoveTest(unittest.TestCase):
def setUp(self):
self.game = LocalGame()
def test_legal_kingside_castle(self):
self.game.board.set_board_fen('8/8/8/8/8/8/8/R3K2R')
self.game.board.set_castling_fen('KQkq')
req, taken, opt_capture = self.game.move(Move(E1, G1))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.assertEqual(self.game.board.board_fen(), '8/8/8/8/8/8/8/R4RK1')
def test_legal_queenside_castle(self):
self.game.board.set_board_fen('8/8/8/8/8/8/8/R3K2R')
self.game.board.set_castling_fen('KQkq')
req, taken, opt_capture = self.game.move(Move(E1, C1))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.assertEqual(self.game.board.board_fen(), '8/8/8/8/8/8/8/2KR3R')
def test_queenside_castle_piece_between(self):
for fen in ['r1P1kbnr/pp1ppppp/8/8/8/8/PP1PPPPP/R1p1KBNR',
'r2Pkbnr/pp1ppppp/8/8/8/8/PP1PPPPP/R2pKBNR',
'rP2kbnr/pp1ppppp/8/8/8/8/PP1PPPPP/Rp2KBNR']:
self.game.board.set_board_fen(fen)
self.game.board.turn = WHITE
self.game.turn = WHITE
req, tak, opt_capture = self.game.move(Move(E1, C1))
self.assertEqual(tak, None)
self.game.board.turn = BLACK
self.game.turn = BLACK
req, tak, opt_capture = self.game.move(Move(E8, C8))
self.assertEqual(tak, None)
def test_kingside_castle_piece_between(self):
for fen in ['rnbqkP1r/ppppp1pp/8/8/8/8/PPPPP1PP/RNBQKp1R',
'rnbqk1Pr/ppppp1pp/8/8/8/8/PPPPP1PP/RNBQK1pR']:
self.game.board.set_board_fen(fen)
self.game.board.turn = WHITE
self.game.turn = WHITE
req, tak, opt_capture = self.game.move(Move(E1, G1))
self.assertEqual(tak, None)
self.game.board.turn = BLACK
self.game.turn = BLACK
req, tak, opt_capture = self.game.move(Move(E8, G8))
self.assertEqual(tak, None)
def test_queenside_castle_no_rights(self):
self.game.board.set_board_fen('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R')
self.game.board.turn = WHITE
self.game.turn = WHITE
for castling_fen in ['-', 'k', 'q', 'kq', 'Kk', 'Kq', 'Kkq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E1, C1))
self.game.board.turn = BLACK
self.game.turn = BLACK
for castling_fen in ['-', 'K', 'Q', 'KQ', 'Kk', 'Qk', 'KQk']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E8, C8))
def test_kingside_castle_no_rights(self):
self.game.board.set_board_fen('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R')
self.game.board.turn = WHITE
self.game.turn = WHITE
for castling_fen in ['-', 'k', 'q', 'kq', 'Qk', 'Qq', 'Qkq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E1, G1))
self.game.board.turn = BLACK
self.game.turn = BLACK
for castling_fen in ['-', 'K', 'Q', 'KQ', 'Kq', 'Qq', 'KQq']:
self.game.board.set_castling_fen(castling_fen)
with self.assertRaises(ValueError):
self.game.move(Move(E8, G8))
def test_castling_into_check(self):
self.game.board.set_board_fen('8/8/8/8/6q1/8/8/4K2R')
self.assertFalse(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertTrue(self.game.board.is_check())
def test_castling_out_of_check(self):
self.game.board.set_board_fen('8/8/8/8/8/8/8/q3K2R')
self.assertTrue(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertFalse(self.game.board.is_check())
def test_castling_stay_in_check(self):
self.game.board.set_board_fen('8/8/8/8/8/6q1/8/4K2R')
self.assertTrue(self.game.board.is_check())
move = Move(E1, G1)
req, taken, opt_capture = self.game.move(move)
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.assertTrue(self.game.board.is_check())
def test_en_passant_white(self):
self.game.board.set_board_fen('rnbqkbnr/p1pppppp/8/8/1p6/8/PPPPPPPP/RNBQKBNR')
req, taken, opt_capture = self.game.move(Move(A2, A4))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
req, taken, opt_capture = self.game.move(Move(B4, A3))
self.assertEqual(req, taken)
self.assertIsNotNone(opt_capture)
self.assertEqual(opt_capture, A4)
def test_en_passant_black(self):
self.game.board.set_board_fen('rnbqkbnr/pppppppp/8/6P1/8/8/PPPPPP1P/RNBQKBNR')
self.game.turn = BLACK
self.game.board.turn = BLACK
req, taken, opt_capture = self.game.move(Move(F7, F5))
self.assertEqual(req, taken)
self.assertIsNone(opt_capture)
req, taken, opt_capture = self.game.move(Move(G5, F6))
self.assertEqual(req, taken)
self.assertIsNotNone(opt_capture)
self.assertEqual(opt_capture, F5)
def test_move_opponent_piece(self):
b = Board()
b.turn = BLACK
for move in b.generate_pseudo_legal_moves():
with self.assertRaises(ValueError):
self.game.move(move)
def test_move_no_piece(self):
for from_square in SquareSet(BB_RANK_3 | BB_RANK_4 | BB_RANK_5 | BB_RANK_6):
for to_square in SQUARES:
with self.assertRaises(ValueError):
m = Move(from_square, to_square)
self.game.move(m)
def test_move_illegal(self):
for from_square in SquareSet(BB_RANK_1 | BB_RANK_2):
for to_square in SQUARES:
move = Move(from_square, to_square)
if move not in self.game.move_actions():
with self.assertRaises(ValueError):
self.game.move(move)
def test_sliding_straight_capture(self):
result_by_move = {
Move(D5, C5): (Move(D5, C5), None),
Move(D5, B5): (Move(D5, B5), B5),
Move(D5, A5): (Move(D5, B5), B5),
Move(D5, D6): (Move(D5, D6), None),
Move(D5, D7): (Move(D5, D7), D7),
Move(D5, D8): (Move(D5, D7), D7),
Move(D5, E5): (Move(D5, E5), None),
Move(D5, F5): (Move(D5, F5), F5),
Move(D5, G5): (Move(D5, F5), F5),
Move(D5, H5): (Move(D5, F5), F5),
Move(D5, D4): (Move(D5, D4), None),
Move(D5, D3): (Move(D5, D3), None),
Move(D5, D2): (Move(D5, D2), None),
Move(D5, D1): (Move(D5, D1), None),
}
for expected_req, (expected_taken, expected_capture) in result_by_move.items():
self.game.board.set_board_fen('8/3p4/8/1p1R1p2/8/8/8/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
req, taken, opt_capture = self.game.move(expected_req)
self.assertEqual(req, expected_req)
self.assertEqual(taken, expected_taken)
self.assertEqual(opt_capture, expected_capture)
def test_sliding_straight_into_ally(self):
for move in [Move(D5, D2), Move(D5, D1)]:
self.game.board.set_board_fen('8/3p4/8/1p1R1p2/8/8/3P4/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
with self.assertRaises(ValueError):
req, taken, opt_capture = self.game.move(move)
def test_sliding_diagonal_capture(self):
result_by_move = {
Move(D5, C6): (Move(D5, C6), None),
Move(D5, B7): (Move(D5, B7), None),
Move(D5, A8): (Move(D5, A8), A8),
Move(D5, E6): (Move(D5, E6), None),
Move(D5, F7): (Move(D5, F7), None),
Move(D5, G8): (Move(D5, G8), G8),
Move(D5, E4): (Move(D5, E4), None),
Move(D5, F3): (Move(D5, F3), None),
Move(D5, G2): (Move(D5, G2), G2),
Move(D5, H1): (Move(D5, G2), G2),
Move(D5, C4): (Move(D5, C4), None),
Move(D5, B3): (Move(D5, B3), None),
Move(D5, A2): (Move(D5, A2), A2),
}
for expected_req, (expected_taken, expected_capture) in result_by_move.items():
self.game.board.set_board_fen('p5p1/8/8/3B4/8/8/p5p1/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
req, taken, opt_capture = self.game.move(expected_req)
self.assertEqual(req, expected_req)
self.assertEqual(taken, expected_taken)
self.assertEqual(opt_capture, expected_capture)
def test_sliding_diagonal_into_ally(self):
for move in [Move(D5, G2), Move(D5, H1)]:
self.game.board.set_board_fen('p5p1/8/8/3B4/8/8/p5P1/8')
self.game.board.turn = WHITE
self.game.turn = WHITE
with self.assertRaises(ValueError):
req, taken, opt_capture = self.game.move(move)
def test_pawn_auto_promotion(self):
self.game.board.set_board_fen('8/3P4/8/8/8/8/8/8')
req, taken, opt_capture = self.game.move(Move(D7, D8))
self.assertEqual(Move(D7, D8), req)
self.assertNotEqual(req, taken)
self.assertEqual(req.to_square, taken.to_square)
self.assertEqual(req.from_square, taken.from_square)
self.assertIsNone(req.promotion)
self.assertEqual(taken.promotion, QUEEN)
def test_pass(self):
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
self.game.board.turn = BLACK
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
self.game.board.turn = WHITE
self.game.board.remove_piece_at(0)
req, taken, opt_capture = self.game.move(None)
self.assertEqual(req, None)
self.assertEqual(taken, None)
self.assertIsNone(opt_capture)
def test_legal_fuzz(self, max_turns=500):
board = Board()
turn = 1
while not board.is_game_over() and turn < max_turns:
move = random.choice(list(board.generate_pseudo_legal_moves()) + [None])
req, taken, opt_square = self.game.move(move)
self.assertEqual(req, taken)
if move is not None and board.is_capture(move):
self.assertIsNotNone(opt_square)
board.push(move if move is not None else Move.null())
self.assertEqual(self.game.board, board)
turn += 1
class OpponentMoveResultsTestCase(unittest.TestCase):
def test_no_capture(self):
game = LocalGame()
game.start()
_, _, result1 = game.move(Move(A2, A4))
self.assertIsNone(result1)
self.assertEqual(result1, game.opponent_move_results())
game.end_turn()
self.assertEqual(result1, game.opponent_move_results())
game.sense(E5)
self.assertEqual(result1, game.opponent_move_results())
_, _, result2 = game.move(Move(F7, F5))
self.assertIsNone(result2)
self.assertEqual(result2, game.opponent_move_results())
def test_capture(self):
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/ppp2ppp/8/3pp3/3PP3/8/PPP2PPP/RNBQKBNR')
game.start()
_, _, result1 = game.move(Move(D4, E5))
self.assertEqual(result1, E5)
self.assertEqual(result1, game.opponent_move_results())
game.end_turn()
self.assertEqual(result1, game.opponent_move_results())
game.sense(E5)
self.assertEqual(result1, game.opponent_move_results())
_, _, result2 = game.move(Move(D5, E4))
self.assertEqual(result2, E4)
self.assertEqual(result2, game.opponent_move_results())
class IsOverTest(unittest.TestCase):
def test_not_over(self):
game = LocalGame()
game.start()
self.assertFalse(game.is_over())
def test_forced_over(self):
game = LocalGame()
game.start()
self.assertFalse(game.is_over())
game.end()
self.assertTrue(game.is_over())
def test_no_time_both(self):
game = LocalGame(seconds_per_player=0)
game.start()
self.assertTrue(game.is_over())
def test_no_time_white(self):
game = LocalGame()
game.seconds_left_by_color[WHITE] = 0
game.start()
self.assertTrue(game.is_over())
def test_no_time_black(self):
game = LocalGame()
game.seconds_left_by_color[BLACK] = 0
game.start()
self.assertTrue(game.is_over())
def test_white_king_captured(self):
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQ1BNR')
game.start()
self.assertTrue(game.is_over())
def test_black_king_captured(self):
game = LocalGame()
game.board.set_board_fen('rnbq1bnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR')
game.start()
self.assertTrue(game.is_over())
class WinnerInfoTestCase(unittest.TestCase):
def test_not_over(self):
game = LocalGame()
game.start()
self.assertIsNone(game.get_winner_color())
self.assertIsNone(game.get_win_reason())
def test_forced_over(self):
game = LocalGame()
game.start()
game.end()
self.assertIsNone(game.get_winner_color())
self.assertIsNone(game.get_win_reason())
def test_no_time_white(self):
game = LocalGame()
game.seconds_left_by_color[WHITE] = 0
game.start()
self.assertEqual(BLACK, game.get_winner_color())
self.assertEqual(WinReason.TIMEOUT, game.get_win_reason())
def test_no_time_black(self):
game = LocalGame()
game.seconds_left_by_color[BLACK] = 0
game.start()
self.assertEqual(WHITE, game.get_winner_color())
self.assertEqual(WinReason.TIMEOUT, game.get_win_reason())
def test_white_king_captured(self):
game = LocalGame()
game.board.set_board_fen('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQ1BNR')
game.start()
self.assertEqual(BLACK, game.get_winner_color())
self.assertEqual(WinReason.KING_CAPTURE, game.get_win_reason())
def test_black_king_captured(self):
game = LocalGame()
game.board.set_board_fen('rnbq1bnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR')
game.start()
self.assertEqual(WHITE, game.get_winner_color())
self.assertEqual(WinReason.KING_CAPTURE, game.get_win_reason())
class GetGameHistoryTestCase(unittest.TestCase):
def test_no_history_until_game_over(self):
g = LocalGame()
g.sense(E2)
self.assertEqual(g.get_game_history(), None)
g.move(Move(E2, E4))
self.assertEqual(g.get_game_history(), None)
g.sense(A8)
g.move(Move(E7, E5))
self.assertEqual(g.get_game_history(), None)
g.sense(E2)
g.move(Move(F1, B5))
g.sense(A8)
g.move(Move(D7, D5))
g.sense(E8)
g.move(Move(B5, E8))
self.assertTrue(g.is_over())
self.assertNotEqual(g.get_game_history(), None)
| true | true |
f71d23144531baf87db8dd62c8a45eb53a5ae369 | 235 | py | Python | Lection 5. Euler and Hamiltonian cycles/_gens/ansgen.py | gennadiychistyakov/graphs-theory-materials | 191db41634d2bb9a0661963d489a34ba731506f1 | [
"MIT"
] | null | null | null | Lection 5. Euler and Hamiltonian cycles/_gens/ansgen.py | gennadiychistyakov/graphs-theory-materials | 191db41634d2bb9a0661963d489a34ba731506f1 | [
"MIT"
] | null | null | null | Lection 5. Euler and Hamiltonian cycles/_gens/ansgen.py | gennadiychistyakov/graphs-theory-materials | 191db41634d2bb9a0661963d489a34ba731506f1 | [
"MIT"
] | null | null | null | import os
os.system("javac Solver.java")
for root, dirs, files in os.walk(".", topdown = False):
for f in files:
if ".in" in f:
os.system("java Solver < " + f + " > " + f[:-3] + ".out")
os.system("del Solver.class") | 23.5 | 62 | 0.557447 | import os
os.system("javac Solver.java")
for root, dirs, files in os.walk(".", topdown = False):
for f in files:
if ".in" in f:
os.system("java Solver < " + f + " > " + f[:-3] + ".out")
os.system("del Solver.class") | true | true |
f71d24c470dc97f742367ab0a893121056ed787b | 62,191 | py | Python | python/pyspark/sql.py | shlomitub28/spark | d3a3840e077802647aced1ceace1494605dda1db | [
"Apache-2.0"
] | 1 | 2021-05-19T17:59:20.000Z | 2021-05-19T17:59:20.000Z | python/pyspark/sql.py | shlomitub28/spark | d3a3840e077802647aced1ceace1494605dda1db | [
"Apache-2.0"
] | null | null | null | python/pyspark/sql.py | shlomitub28/spark | d3a3840e077802647aced1ceace1494605dda1db | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import types
import itertools
import warnings
import decimal
import datetime
import keyword
import warnings
from array import array
from operator import itemgetter
from pyspark.rdd import RDD
from pyspark.serializers import BatchedSerializer, PickleSerializer, CloudPickleSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from itertools import chain, ifilter, imap
from py4j.protocol import Py4JError
from py4j.java_collections import ListConverter, MapConverter
__all__ = [
"StringType", "BinaryType", "BooleanType", "TimestampType", "DecimalType",
"DoubleType", "FloatType", "ByteType", "IntegerType", "LongType",
"ShortType", "ArrayType", "MapType", "StructField", "StructType",
"SQLContext", "HiveContext", "SchemaRDD", "Row"]
class DataType(object):
"""Spark SQL DataType"""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class PrimitiveTypeSingleton(type):
"""Metaclass for PrimitiveType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(PrimitiveTypeSingleton, cls).__call__()
return cls._instances[cls]
class PrimitiveType(DataType):
"""Spark SQL PrimitiveType"""
__metaclass__ = PrimitiveTypeSingleton
def __eq__(self, other):
# because they should be the same object
return self is other
class StringType(PrimitiveType):
"""Spark SQL StringType
The data type representing string values.
"""
class BinaryType(PrimitiveType):
"""Spark SQL BinaryType
The data type representing bytearray values.
"""
class BooleanType(PrimitiveType):
"""Spark SQL BooleanType
The data type representing bool values.
"""
class TimestampType(PrimitiveType):
"""Spark SQL TimestampType
The data type representing datetime.datetime values.
"""
class DecimalType(PrimitiveType):
"""Spark SQL DecimalType
The data type representing decimal.Decimal values.
"""
class DoubleType(PrimitiveType):
"""Spark SQL DoubleType
The data type representing float values.
"""
class FloatType(PrimitiveType):
"""Spark SQL FloatType
The data type representing single precision floating-point values.
"""
class ByteType(PrimitiveType):
"""Spark SQL ByteType
The data type representing int values with 1 singed byte.
"""
class IntegerType(PrimitiveType):
"""Spark SQL IntegerType
The data type representing int values.
"""
class LongType(PrimitiveType):
"""Spark SQL LongType
The data type representing long values. If the any value is
beyond the range of [-9223372036854775808, 9223372036854775807],
please use DecimalType.
"""
class ShortType(PrimitiveType):
"""Spark SQL ShortType
The data type representing int values with 2 signed bytes.
"""
class ArrayType(DataType):
"""Spark SQL ArrayType
The data type representing list values. An ArrayType object
comprises two fields, elementType (a DataType) and containsNull (a bool).
The field of elementType is used to specify the type of array elements.
The field of containsNull is used to specify if the array has None values.
"""
def __init__(self, elementType, containsNull=True):
"""Creates an ArrayType
:param elementType: the data type of elements.
:param containsNull: indicates whether the list contains None values.
>>> ArrayType(StringType) == ArrayType(StringType, True)
True
>>> ArrayType(StringType, False) == ArrayType(StringType)
False
"""
self.elementType = elementType
self.containsNull = containsNull
def __str__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
class MapType(DataType):
"""Spark SQL MapType
The data type representing dict values. A MapType object comprises
three fields, keyType (a DataType), valueType (a DataType) and
valueContainsNull (a bool).
The field of keyType is used to specify the type of keys in the map.
The field of valueType is used to specify the type of values in the map.
The field of valueContainsNull is used to specify if values of this
map has None values.
For values of a MapType column, keys are not allowed to have None values.
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""Creates a MapType
:param keyType: the data type of keys.
:param valueType: the data type of values.
:param valueContainsNull: indicates whether values contains
null values.
>>> (MapType(StringType, IntegerType)
... == MapType(StringType, IntegerType, True))
True
>>> (MapType(StringType, IntegerType, False)
... == MapType(StringType, FloatType))
False
"""
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
class StructField(DataType):
"""Spark SQL StructField
Represents a field in a StructType.
A StructField object comprises three fields, name (a string),
dataType (a DataType) and nullable (a bool). The field of name
is the name of a StructField. The field of dataType specifies
the data type of a StructField.
The field of nullable specifies if values of a StructField can
contain None values.
"""
def __init__(self, name, dataType, nullable):
"""Creates a StructField
:param name: the name of this field.
:param dataType: the data type of this field.
:param nullable: indicates whether values of this field
can be null.
>>> (StructField("f1", StringType, True)
... == StructField("f1", StringType, True))
True
>>> (StructField("f1", StringType, True)
... == StructField("f2", StringType, True))
False
"""
self.name = name
self.dataType = dataType
self.nullable = nullable
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
class StructType(DataType):
"""Spark SQL StructType
The data type representing rows.
A StructType object comprises a list of L{StructField}.
"""
def __init__(self, fields):
"""Creates a StructType
>>> struct1 = StructType([StructField("f1", StringType, True)])
>>> struct2 = StructType([StructField("f1", StringType, True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType, True)])
>>> struct2 = StructType([StructField("f1", StringType, True),
... [StructField("f2", IntegerType, False)]])
>>> struct1 == struct2
False
"""
self.fields = fields
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self.fields))
def _parse_datatype_list(datatype_list_string):
"""Parses a list of comma separated data types."""
index = 0
datatype_list = []
start = 0
depth = 0
while index < len(datatype_list_string):
if depth == 0 and datatype_list_string[index] == ",":
datatype_string = datatype_list_string[start:index].strip()
datatype_list.append(_parse_datatype_string(datatype_string))
start = index + 1
elif datatype_list_string[index] == "(":
depth += 1
elif datatype_list_string[index] == ")":
depth -= 1
index += 1
# Handle the last data type
datatype_string = datatype_list_string[start:index].strip()
datatype_list.append(_parse_datatype_string(datatype_string))
return datatype_list
_all_primitive_types = dict((k, v) for k, v in globals().iteritems()
if type(v) is PrimitiveTypeSingleton and v.__base__ == PrimitiveType)
def _parse_datatype_string(datatype_string):
"""Parses the given data type string.
>>> def check_datatype(datatype):
... scala_datatype = sqlCtx._ssql_ctx.parseDataType(str(datatype))
... python_datatype = _parse_datatype_string(
... scala_datatype.toString())
... return datatype == python_datatype
>>> all(check_datatype(cls()) for cls in _all_primitive_types.values())
True
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
True
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
True
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
True
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False)])
>>> check_datatype(complex_structtype)
True
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
True
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
True
"""
index = datatype_string.find("(")
if index == -1:
# It is a primitive type.
index = len(datatype_string)
type_or_field = datatype_string[:index]
rest_part = datatype_string[index + 1:len(datatype_string) - 1].strip()
if type_or_field in _all_primitive_types:
return _all_primitive_types[type_or_field]()
elif type_or_field == "ArrayType":
last_comma_index = rest_part.rfind(",")
containsNull = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
containsNull = False
elementType = _parse_datatype_string(
rest_part[:last_comma_index].strip())
return ArrayType(elementType, containsNull)
elif type_or_field == "MapType":
last_comma_index = rest_part.rfind(",")
valueContainsNull = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
valueContainsNull = False
keyType, valueType = _parse_datatype_list(
rest_part[:last_comma_index].strip())
return MapType(keyType, valueType, valueContainsNull)
elif type_or_field == "StructField":
first_comma_index = rest_part.find(",")
name = rest_part[:first_comma_index].strip()
last_comma_index = rest_part.rfind(",")
nullable = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
nullable = False
dataType = _parse_datatype_string(
rest_part[first_comma_index + 1:last_comma_index].strip())
return StructField(name, dataType, nullable)
elif type_or_field == "StructType":
# rest_part should be in the format like
# List(StructField(field1,IntegerType,false)).
field_list_string = rest_part[rest_part.find("(") + 1:-1]
fields = _parse_datatype_list(field_list_string)
return StructType(fields)
# Mapping Python types to Spark SQL DateType
_type_mappings = {
bool: BooleanType,
int: IntegerType,
long: LongType,
float: DoubleType,
str: StringType,
unicode: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.datetime: TimestampType,
datetime.date: TimestampType,
datetime.time: TimestampType,
}
def _infer_type(obj):
"""Infer the DataType from obj"""
if obj is None:
raise ValueError("Can not infer type for None")
dataType = _type_mappings.get(type(obj))
if dataType is not None:
return dataType()
if isinstance(obj, dict):
if not obj:
raise ValueError("Can not infer type for empty dict")
key, value = obj.iteritems().next()
return MapType(_infer_type(key), _infer_type(value), True)
elif isinstance(obj, (list, array)):
if not obj:
raise ValueError("Can not infer type for empty list/array")
return ArrayType(_infer_type(obj[0]), True)
else:
try:
return _infer_schema(obj)
except ValueError:
raise ValueError("not supported type: %s" % type(obj))
def _infer_schema(row):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, tuple):
if hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
elif hasattr(row, "__FIELDS__"): # Row
items = zip(row.__FIELDS__, tuple(row))
elif all(isinstance(x, tuple) and len(x) == 2 for x in row):
items = row
else:
raise ValueError("Can't infer schema from tuple")
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise ValueError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _create_converter(obj, dataType):
"""Create an converter to drop the names of fields in obj """
if isinstance(dataType, ArrayType):
conv = _create_converter(obj[0], dataType.elementType)
return lambda row: map(conv, row)
elif isinstance(dataType, MapType):
value = obj.values()[0]
conv = _create_converter(value, dataType.valueType)
return lambda row: dict((k, conv(v)) for k, v in row.iteritems())
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
if isinstance(obj, dict):
conv = lambda o: tuple(o.get(n) for n in names)
elif isinstance(obj, tuple):
if hasattr(obj, "_fields"): # namedtuple
conv = tuple
elif hasattr(obj, "__FIELDS__"):
conv = tuple
elif all(isinstance(x, tuple) and len(x) == 2 for x in obj):
conv = lambda o: tuple(v for k, v in o)
else:
raise ValueError("unexpected tuple")
elif hasattr(obj, "__dict__"): # object
conv = lambda o: [o.__dict__.get(n, None) for n in names]
if all(isinstance(f.dataType, PrimitiveType) for f in dataType.fields):
return conv
row = conv(obj)
convs = [_create_converter(v, f.dataType)
for v, f in zip(row, dataType.fields)]
def nested_conv(row):
return tuple(f(v) for f, v in zip(convs, conv(row)))
return nested_conv
def _drop_schema(rows, schema):
""" all the names of fields, becoming tuples"""
iterator = iter(rows)
row = iterator.next()
converter = _create_converter(row, schema)
yield converter(row)
for i in iterator:
yield converter(i)
_BRACKETS = {'(': ')', '[': ']', '{': '}'}
def _split_schema_abstract(s):
"""
split the schema abstract into fields
>>> _split_schema_abstract("a b c")
['a', 'b', 'c']
>>> _split_schema_abstract("a(a b)")
['a(a b)']
>>> _split_schema_abstract("a b[] c{a b}")
['a', 'b[]', 'c{a b}']
>>> _split_schema_abstract(" ")
[]
"""
r = []
w = ''
brackets = []
for c in s:
if c == ' ' and not brackets:
if w:
r.append(w)
w = ''
else:
w += c
if c in _BRACKETS:
brackets.append(c)
elif c in _BRACKETS.values():
if not brackets or c != _BRACKETS[brackets.pop()]:
raise ValueError("unexpected " + c)
if brackets:
raise ValueError("brackets not closed: %s" % brackets)
if w:
r.append(w)
return r
def _parse_field_abstract(s):
"""
Parse a field in schema abstract
>>> _parse_field_abstract("a")
StructField(a,None,true)
>>> _parse_field_abstract("b(c d)")
StructField(b,StructType(...c,None,true),StructField(d...
>>> _parse_field_abstract("a[]")
StructField(a,ArrayType(None,true),true)
>>> _parse_field_abstract("a{[]}")
StructField(a,MapType(None,ArrayType(None,true),true),true)
"""
if set(_BRACKETS.keys()) & set(s):
idx = min((s.index(c) for c in _BRACKETS if c in s))
name = s[:idx]
return StructField(name, _parse_schema_abstract(s[idx:]), True)
else:
return StructField(s, None, True)
def _parse_schema_abstract(s):
"""
parse abstract into schema
>>> _parse_schema_abstract("a b c")
StructType...a...b...c...
>>> _parse_schema_abstract("a[b c] b{}")
StructType...a,ArrayType...b...c...b,MapType...
>>> _parse_schema_abstract("c{} d{a b}")
StructType...c,MapType...d,MapType...a...b...
>>> _parse_schema_abstract("a b(t)").fields[1]
StructField(b,StructType(List(StructField(t,None,true))),true)
"""
s = s.strip()
if not s:
return
elif s.startswith('('):
return _parse_schema_abstract(s[1:-1])
elif s.startswith('['):
return ArrayType(_parse_schema_abstract(s[1:-1]), True)
elif s.startswith('{'):
return MapType(None, _parse_schema_abstract(s[1:-1]))
parts = _split_schema_abstract(s)
fields = [_parse_field_abstract(p) for p in parts]
return StructType(fields)
def _infer_schema_type(obj, dataType):
"""
Fill the dataType with types infered from obj
>>> schema = _parse_schema_abstract("a b c")
>>> row = (1, 1.0, "str")
>>> _infer_schema_type(row, schema)
StructType...IntegerType...DoubleType...StringType...
>>> row = [[1], {"key": (1, 2.0)}]
>>> schema = _parse_schema_abstract("a[] b{c d}")
>>> _infer_schema_type(row, schema)
StructType...a,ArrayType...b,MapType(StringType,...c,IntegerType...
"""
if dataType is None:
return _infer_type(obj)
if not obj:
raise ValueError("Can not infer type from empty value")
if isinstance(dataType, ArrayType):
eType = _infer_schema_type(obj[0], dataType.elementType)
return ArrayType(eType, True)
elif isinstance(dataType, MapType):
k, v = obj.iteritems().next()
return MapType(_infer_type(k),
_infer_schema_type(v, dataType.valueType))
elif isinstance(dataType, StructType):
fs = dataType.fields
assert len(fs) == len(obj), \
"Obj(%s) have different length with fields(%s)" % (obj, fs)
fields = [StructField(f.name, _infer_schema_type(o, f.dataType), True)
for o, f in zip(obj, fs)]
return StructType(fields)
else:
raise ValueError("Unexpected dataType: %s" % dataType)
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list),
}
def _verify_type(obj, dataType):
"""
Verify the type of obj against dataType, raise an exception if
they do not match.
>>> _verify_type(None, StructType([]))
>>> _verify_type("", StringType())
>>> _verify_type(0, IntegerType())
>>> _verify_type(range(3), ArrayType(ShortType()))
>>> _verify_type(set(), ArrayType(StringType())) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _verify_type({}, MapType(StringType(), IntegerType()))
>>> _verify_type((), StructType([]))
>>> _verify_type([], StructType([]))
>>> _verify_type([1], StructType([])) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
# all objects are nullable
if obj is None:
return
_type = type(dataType)
assert _type in _acceptable_types, "unkown datatype: %s" % dataType
# subclass of them can not be deserialized in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError("%s can not accept abject in type %s"
% (dataType, type(obj)))
if isinstance(dataType, ArrayType):
for i in obj:
_verify_type(i, dataType.elementType)
elif isinstance(dataType, MapType):
for k, v in obj.iteritems():
_verify_type(k, dataType.keyType)
_verify_type(v, dataType.valueType)
elif isinstance(dataType, StructType):
if len(obj) != len(dataType.fields):
raise ValueError("Length of object (%d) does not match with"
"length of fields (%d)" % (len(obj), len(dataType.fields)))
for v, f in zip(obj, dataType.fields):
_verify_type(v, f.dataType)
_cached_cls = {}
def _restore_object(dataType, obj):
""" Restore object during unpickling. """
# use id(dataType) as key to speed up lookup in dict
# Because of batched pickling, dataType will be the
# same object in mose cases.
k = id(dataType)
cls = _cached_cls.get(k)
if cls is None:
# use dataType as key to avoid create multiple class
cls = _cached_cls.get(dataType)
if cls is None:
cls = _create_cls(dataType)
_cached_cls[dataType] = cls
_cached_cls[k] = cls
return cls(obj)
def _create_object(cls, v):
""" Create an customized object with class `cls`. """
return cls(v) if v is not None else v
def _create_getter(dt, i):
""" Create a getter for item `i` with schema """
cls = _create_cls(dt)
def getter(self):
return _create_object(cls, self[i])
return getter
def _has_struct(dt):
"""Return whether `dt` is or has StructType in it"""
if isinstance(dt, StructType):
return True
elif isinstance(dt, ArrayType):
return _has_struct(dt.elementType)
elif isinstance(dt, MapType):
return _has_struct(dt.valueType)
return False
def _create_properties(fields):
"""Create properties according to fields"""
ps = {}
for i, f in enumerate(fields):
name = f.name
if (name.startswith("__") and name.endswith("__")
or keyword.iskeyword(name)):
warnings.warn("field name %s can not be accessed in Python,"
"use position to access it instead" % name)
if _has_struct(f.dataType):
# delay creating object until accessing it
getter = _create_getter(f.dataType, i)
else:
getter = itemgetter(i)
ps[name] = property(getter)
return ps
def _create_cls(dataType):
"""
Create an class by dataType
The created class is similar to namedtuple, but can have nested schema.
>>> schema = _parse_schema_abstract("a b c")
>>> row = (1, 1.0, "str")
>>> schema = _infer_schema_type(row, schema)
>>> obj = _create_cls(schema)(row)
>>> import pickle
>>> pickle.loads(pickle.dumps(obj))
Row(a=1, b=1.0, c='str')
>>> row = [[1], {"key": (1, 2.0)}]
>>> schema = _parse_schema_abstract("a[] b{c d}")
>>> schema = _infer_schema_type(row, schema)
>>> obj = _create_cls(schema)(row)
>>> pickle.loads(pickle.dumps(obj))
Row(a=[1], b={'key': Row(c=1, d=2.0)})
>>> pickle.loads(pickle.dumps(obj.a))
[1]
>>> pickle.loads(pickle.dumps(obj.b))
{'key': Row(c=1, d=2.0)}
"""
if isinstance(dataType, ArrayType):
cls = _create_cls(dataType.elementType)
def List(l):
if l is None:
return
return [_create_object(cls, v) for v in l]
return List
elif isinstance(dataType, MapType):
cls = _create_cls(dataType.valueType)
def Dict(d):
if d is None:
return
return dict((k, _create_object(cls, v)) for k, v in d.items())
return Dict
elif not isinstance(dataType, StructType):
raise Exception("unexpected data type: %s" % dataType)
class Row(tuple):
""" Row in SchemaRDD """
__DATATYPE__ = dataType
__FIELDS__ = tuple(f.name for f in dataType.fields)
__slots__ = ()
# create property for fast access
locals().update(_create_properties(dataType.fields))
def __repr__(self):
# call collect __repr__ for nested objects
return ("Row(%s)" % ", ".join("%s=%r" % (n, getattr(self, n))
for n in self.__FIELDS__))
def __reduce__(self):
return (_restore_object, (self.__DATATYPE__, tuple(self)))
return Row
class SQLContext(object):
"""Main entry point for Spark SQL functionality.
A SQLContext can be used create L{SchemaRDD}, register L{SchemaRDD} as
tables, execute SQL over tables, cache tables, and read parquet files.
"""
def __init__(self, sparkContext, sqlContext=None):
"""Create a new SQLContext.
@param sparkContext: The SparkContext to wrap.
@param sqlContext: An optional JVM Scala SQLContext. If set, we do not instatiate a new
SQLContext in the JVM, instead we make all calls to this object.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.inferSchema(srdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> bad_rdd = sc.parallelize([1,2,3])
>>> sqlCtx.inferSchema(bad_rdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> from datetime import datetime
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1L,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> srdd = sqlCtx.inferSchema(allTypes)
>>> srdd.registerTempTable("allTypes")
>>> sqlCtx.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row(c0=2, c1=2.0, c2=False, c3=2, c4=0...8, 1, 14, 1, 5), a=1)]
>>> srdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time,
... x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, ...(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
self._pythonToJava = self._jvm.PythonRDD.pythonToJavaArray
self._scala_SQLContext = sqlContext
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
if self._scala_SQLContext is None:
self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
return self._scala_SQLContext
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a lambda function as a UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
>>> sqlCtx.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlCtx.sql("SELECT stringLengthString('test')").collect()
[Row(c0=u'4')]
>>> sqlCtx.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlCtx.sql("SELECT stringLengthInt('test')").collect()
[Row(c0=4)]
"""
func = lambda _, it: imap(lambda x: f(*x), it)
command = (func,
BatchedSerializer(PickleSerializer(), 1024),
BatchedSerializer(PickleSerializer(), 1024))
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if pickled_command > (1 << 20): # 1M
broadcast = self._sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self._sc._pickled_broadcast_vars],
self._sc._gateway._gateway_client)
self._sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(self._sc.environment,
self._sc._gateway._gateway_client)
includes = ListConverter().convert(self._sc._python_includes,
self._sc._gateway._gateway_client)
self._ssql_ctx.registerPython(name,
bytearray(pickled_command),
env,
includes,
self._sc.pythonExec,
broadcast_vars,
self._sc._javaAccumulator,
str(returnType))
def inferSchema(self, rdd):
"""Infer and apply a schema to an RDD of L{Row}.
We peek at the first row of the RDD to determine the fields' names
and types. Nested collections are supported, which include array,
dict, list, Row, tuple, namedtuple, or object.
All the rows in `rdd` should have the same type with the first one,
or it will cause runtime exceptions.
Each row could be L{pyspark.sql.Row} object or namedtuple or objects,
using dict is deprecated.
>>> rdd = sc.parallelize(
... [Row(field1=1, field2="row1"),
... Row(field1=2, field2="row2"),
... Row(field1=3, field2="row3")])
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.collect()[0]
Row(field1=1, field2=u'row1')
>>> NestedRow = Row("f1", "f2")
>>> nestedRdd1 = sc.parallelize([
... NestedRow(array('i', [1, 2]), {"row1": 1.0}),
... NestedRow(array('i', [2, 3]), {"row2": 2.0})])
>>> srdd = sqlCtx.inferSchema(nestedRdd1)
>>> srdd.collect()
[Row(f1=[1, 2], f2={u'row1': 1.0}), ..., f2={u'row2': 2.0})]
>>> nestedRdd2 = sc.parallelize([
... NestedRow([[1, 2], [2, 3]], [1, 2]),
... NestedRow([[2, 3], [3, 4]], [2, 3])])
>>> srdd = sqlCtx.inferSchema(nestedRdd2)
>>> srdd.collect()
[Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), ..., f2=[2, 3])]
"""
if isinstance(rdd, SchemaRDD):
raise TypeError("Cannot apply schema to SchemaRDD")
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated,"
"please use pyspark.sql.Row instead")
schema = _infer_schema(first)
rdd = rdd.mapPartitions(lambda rows: _drop_schema(rows, schema))
return self.applySchema(rdd, schema)
def applySchema(self, rdd, schema):
"""
Applies the given schema to the given RDD of L{tuple} or L{list}.
These tuples or lists can contain complex nested structures like
lists, maps or nested rows.
The schema should be a StructType.
It is important that the schema matches the types of the objects
in each row or exceptions could be thrown at runtime.
>>> rdd2 = sc.parallelize([(1, "row1"), (2, "row2"), (3, "row3")])
>>> schema = StructType([StructField("field1", IntegerType(), False),
... StructField("field2", StringType(), False)])
>>> srdd = sqlCtx.applySchema(rdd2, schema)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.sql("SELECT * from table1")
>>> srdd2.collect()
[Row(field1=1, field2=u'row1'),..., Row(field1=3, field2=u'row3')]
>>> from datetime import datetime
>>> rdd = sc.parallelize([(127, -128L, -32768, 32767, 2147483647L, 1.0,
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3], None)])
>>> schema = StructType([
... StructField("byte1", ByteType(), False),
... StructField("byte2", ByteType(), False),
... StructField("short1", ShortType(), False),
... StructField("short2", ShortType(), False),
... StructField("int", IntegerType(), False),
... StructField("float", FloatType(), False),
... StructField("time", TimestampType(), False),
... StructField("map",
... MapType(StringType(), IntegerType(), False), False),
... StructField("struct",
... StructType([StructField("b", ShortType(), False)]), False),
... StructField("list", ArrayType(ByteType(), False), False),
... StructField("null", DoubleType(), True)])
>>> srdd = sqlCtx.applySchema(rdd, schema)
>>> results = srdd.map(
... lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int, x.float, x.time,
... x.map["a"], x.struct.b, x.list, x.null))
>>> results.collect()[0]
(127, -128, -32768, 32767, 2147483647, 1.0, ...(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
>>> srdd.registerTempTable("table2")
>>> sqlCtx.sql(
... "SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
... "short1 + 1 AS short1, short2 - 1 AS short2, int - 1 AS int, " +
... "float + 1.5 as float FROM table2").collect()
[Row(byte1=126, byte2=-127, short1=-32767, short2=32766, int=2147483646, float=2.5)]
>>> rdd = sc.parallelize([(127, -32768, 1.0,
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3])])
>>> abstract = "byte short float time map{} struct(b) list[]"
>>> schema = _parse_schema_abstract(abstract)
>>> typedSchema = _infer_schema_type(rdd.first(), schema)
>>> srdd = sqlCtx.applySchema(rdd, typedSchema)
>>> srdd.collect()
[Row(byte=127, short=-32768, float=1.0, time=..., list=[1, 2, 3])]
"""
if isinstance(rdd, SchemaRDD):
raise TypeError("Cannot apply schema to SchemaRDD")
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
# take the first few rows to verify schema
rows = rdd.take(10)
# Row() cannot been deserialized by Pyrolite
if rows and isinstance(rows[0], tuple) and rows[0].__class__.__name__ == 'Row':
rdd = rdd.map(tuple)
rows = rdd.take(10)
for row in rows:
_verify_type(row, schema)
batched = isinstance(rdd._jrdd_deserializer, BatchedSerializer)
jrdd = self._pythonToJava(rdd._jrdd, batched)
srdd = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), str(schema))
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def registerRDDAsTable(self, rdd, tableName):
"""Registers the given RDD as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of
SQLContext.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
"""
if (rdd.__class__ is SchemaRDD):
srdd = rdd._jschema_rdd.baseSchemaRDD()
self._ssql_ctx.registerRDDAsTable(srdd, tableName)
else:
raise ValueError("Can only register SchemaRDD as table")
def parquetFile(self, path):
"""Loads a Parquet file, returning the result as a L{SchemaRDD}.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
jschema_rdd = self._ssql_ctx.parquetFile(path).toJavaSchemaRDD()
return SchemaRDD(jschema_rdd, self)
def jsonFile(self, path, schema=None):
"""
Loads a text file storing one JSON object per line as a
L{SchemaRDD}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it goes through the entire dataset once to determine
the schema.
>>> import tempfile, shutil
>>> jsonFile = tempfile.mkdtemp()
>>> shutil.rmtree(jsonFile)
>>> ofn = open(jsonFile, 'w')
>>> for json in jsonStrings:
... print>>ofn, json
>>> ofn.close()
>>> srdd1 = sqlCtx.jsonFile(jsonFile)
>>> sqlCtx.registerRDDAsTable(srdd1, "table1")
>>> srdd2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in srdd2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> srdd3 = sqlCtx.jsonFile(jsonFile, srdd1.schema())
>>> sqlCtx.registerRDDAsTable(srdd3, "table2")
>>> srdd4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in srdd4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> srdd5 = sqlCtx.jsonFile(jsonFile, schema)
>>> sqlCtx.registerRDDAsTable(srdd5, "table3")
>>> srdd6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> srdd6.collect()
[Row(f1=u'row1', f2=None, f3=None)...Row(f1=u'row3', f2=[], f3=None)]
"""
if schema is None:
srdd = self._ssql_ctx.jsonFile(path)
else:
scala_datatype = self._ssql_ctx.parseDataType(str(schema))
srdd = self._ssql_ctx.jsonFile(path, scala_datatype)
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def jsonRDD(self, rdd, schema=None):
"""Loads an RDD storing one JSON object per string as a L{SchemaRDD}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it goes through the entire dataset once to determine
the schema.
>>> srdd1 = sqlCtx.jsonRDD(json)
>>> sqlCtx.registerRDDAsTable(srdd1, "table1")
>>> srdd2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in srdd2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> srdd3 = sqlCtx.jsonRDD(json, srdd1.schema())
>>> sqlCtx.registerRDDAsTable(srdd3, "table2")
>>> srdd4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in srdd4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> srdd5 = sqlCtx.jsonRDD(json, schema)
>>> sqlCtx.registerRDDAsTable(srdd5, "table3")
>>> srdd6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> srdd6.collect()
[Row(f1=u'row1', f2=None,...Row(f1=u'row3', f2=[], f3=None)]
>>> sqlCtx.jsonRDD(sc.parallelize(['{}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
>>> sqlCtx.jsonRDD(sc.parallelize(['{"key0": null}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
"""
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = rdd.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._jvm.BytesToString())
if schema is None:
srdd = self._ssql_ctx.jsonRDD(jrdd.rdd())
else:
scala_datatype = self._ssql_ctx.parseDataType(str(schema))
srdd = self._ssql_ctx.jsonRDD(jrdd.rdd(), scala_datatype)
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def sql(self, sqlQuery):
"""Return a L{SchemaRDD} representing the result of the given query.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> srdd2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return SchemaRDD(self._ssql_ctx.sql(sqlQuery).toJavaSchemaRDD(), self)
def table(self, tableName):
"""Returns the specified table as a L{SchemaRDD}.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.table("table1")
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
return SchemaRDD(self._ssql_ctx.table(tableName).toJavaSchemaRDD(), self)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from hive-site.xml on the classpath.
It supports running both SQL and HiveQL commands.
"""
def __init__(self, sparkContext, hiveContext=None):
"""Create a new HiveContext.
@param sparkContext: The SparkContext to wrap.
@param hiveContext: An optional JVM Scala HiveContext. If set, we do not instatiate a new
HiveContext in the JVM, instead we make all calls to this object.
"""
SQLContext.__init__(self, sparkContext)
if hiveContext:
self._scala_HiveContext = hiveContext
@property
def _ssql_ctx(self):
try:
if not hasattr(self, '_scala_HiveContext'):
self._scala_HiveContext = self._get_hive_ctx()
return self._scala_HiveContext
except Py4JError as e:
raise Exception("You must build Spark with Hive. "
"Export 'SPARK_HIVE=true' and run "
"sbt/sbt assembly", e)
def _get_hive_ctx(self):
return self._jvm.HiveContext(self._jsc.sc())
def hiveql(self, hqlQuery):
"""
DEPRECATED: Use sql()
"""
warnings.warn("hiveql() is deprecated as the sql function now parses using HiveQL by" +
"default. The SQL dialect for parsing can be set using 'spark.sql.dialect'",
DeprecationWarning)
return SchemaRDD(self._ssql_ctx.hiveql(hqlQuery).toJavaSchemaRDD(), self)
def hql(self, hqlQuery):
"""
DEPRECATED: Use sql()
"""
warnings.warn("hql() is deprecated as the sql function now parses using HiveQL by" +
"default. The SQL dialect for parsing can be set using 'spark.sql.dialect'",
DeprecationWarning)
return self.hiveql(hqlQuery)
class LocalHiveContext(HiveContext):
"""Starts up an instance of hive where metadata is stored locally.
An in-process metadata data is created with data stored in ./metadata.
Warehouse data is stored in in ./warehouse.
>>> import os
>>> hiveCtx = LocalHiveContext(sc)
>>> try:
... supress = hiveCtx.sql("DROP TABLE src")
... except Exception:
... pass
>>> kv1 = os.path.join(os.environ["SPARK_HOME"],
... 'examples/src/main/resources/kv1.txt')
>>> supress = hiveCtx.sql(
... "CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
>>> supress = hiveCtx.sql("LOAD DATA LOCAL INPATH '%s' INTO TABLE src"
... % kv1)
>>> results = hiveCtx.sql("FROM src SELECT value"
... ).map(lambda r: int(r.value.split('_')[1]))
>>> num = results.count()
>>> reduce_sum = results.reduce(lambda x, y: x + y)
>>> num
500
>>> reduce_sum
130091
"""
def __init__(self, sparkContext, sqlContext=None):
HiveContext.__init__(self, sparkContext, sqlContext)
warnings.warn("LocalHiveContext is deprecated. "
"Use HiveContext instead.", DeprecationWarning)
def _get_hive_ctx(self):
return self._jvm.LocalHiveContext(self._jsc.sc())
class TestHiveContext(HiveContext):
def _get_hive_ctx(self):
return self._jvm.TestHiveContext(self._jsc.sc())
def _create_row(fields, values):
row = Row(*values)
row.__FIELDS__ = fields
return row
class Row(tuple):
"""
A row in L{SchemaRDD}. The fields in it can be accessed like attributes.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row.name, row.age
('Alice', 11)
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if args:
# create row class or objects
return tuple.__new__(self, args)
elif kwargs:
# create row objects
names = sorted(kwargs.keys())
values = tuple(kwargs[n] for n in names)
row = tuple.__new__(self, values)
row.__FIELDS__ = names
return row
else:
raise ValueError("No args or kwargs")
# let obect acs like class
def __call__(self, *args):
"""create new Row object"""
return _create_row(self, args)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__FIELDS__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
def __reduce__(self):
if hasattr(self, "__FIELDS__"):
return (_create_row, (self.__FIELDS__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
if hasattr(self, "__FIELDS__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__FIELDS__, self))
else:
return "<Row(%s)>" % ", ".join(self)
def inherit_doc(cls):
for name, func in vars(cls).items():
# only inherit docstring for public functions
if name.startswith("_"):
continue
if not func.__doc__:
for parent in cls.__bases__:
parent_func = getattr(parent, name, None)
if parent_func and getattr(parent_func, "__doc__", None):
func.__doc__ = parent_func.__doc__
break
return cls
@inherit_doc
class SchemaRDD(RDD):
"""An RDD of L{Row} objects that has an associated schema.
The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
utilize the relational query api exposed by Spark SQL.
For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
L{SchemaRDD} is not operated on directly, as it's underlying
implementation is an RDD composed of Java objects. Instead it is
converted to a PythonRDD in the JVM, on which Python operations can
be done.
This class receives raw tuples from Java but assigns a class to it in
all its data-collection methods (mapPartitionsWithIndex, collect, take,
etc) so that PySpark sees them as Row objects with named fields.
"""
def __init__(self, jschema_rdd, sql_ctx):
self.sql_ctx = sql_ctx
self._sc = sql_ctx._sc
clsName = jschema_rdd.getClass().getName()
assert clsName.endswith("JavaSchemaRDD"), "jschema_rdd must be JavaSchemaRDD"
self._jschema_rdd = jschema_rdd
self._id = None
self.is_cached = False
self.is_checkpointed = False
self.ctx = self.sql_ctx._sc
# the _jrdd is created by javaToPython(), serialized by pickle
self._jrdd_deserializer = BatchedSerializer(PickleSerializer())
@property
def _jrdd(self):
"""Lazy evaluation of PythonRDD object.
Only done when a user calls methods defined by the
L{pyspark.rdd.RDD} super class (map, filter, etc.).
"""
if not hasattr(self, '_lazy_jrdd'):
self._lazy_jrdd = self._jschema_rdd.baseSchemaRDD().javaToPython()
return self._lazy_jrdd
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def limit(self, num):
"""Limit the result count to the number specified.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.limit(2).collect()
[Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')]
>>> srdd.limit(0).collect()
[]
"""
rdd = self._jschema_rdd.baseSchemaRDD().limit(num).toJavaSchemaRDD()
return SchemaRDD(rdd, self.sql_ctx)
def saveAsParquetFile(self, path):
"""Save the contents as a Parquet file, preserving the schema.
Files that are written out using this method can be read back in as
a SchemaRDD using the L{SQLContext.parquetFile} method.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> sorted(srdd2.collect()) == sorted(srdd.collect())
True
"""
self._jschema_rdd.saveAsParquetFile(path)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the L{SQLContext}
that was used to create this SchemaRDD.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.registerTempTable("test")
>>> srdd2 = sqlCtx.sql("select * from test")
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
self._jschema_rdd.registerTempTable(name)
def registerAsTable(self, name):
"""DEPRECATED: use registerTempTable() instead"""
warnings.warn("Use registerTempTable instead of registerAsTable.", DeprecationWarning)
self.registerTempTable(name)
def insertInto(self, tableName, overwrite=False):
"""Inserts the contents of this SchemaRDD into the specified table.
Optionally overwriting any existing data.
"""
self._jschema_rdd.insertInto(tableName, overwrite)
def saveAsTable(self, tableName):
"""Creates a new table with the contents of this SchemaRDD."""
self._jschema_rdd.saveAsTable(tableName)
def schema(self):
"""Returns the schema of this SchemaRDD (represented by
a L{StructType})."""
return _parse_datatype_string(self._jschema_rdd.baseSchemaRDD().schema().toString())
def schemaString(self):
"""Returns the output schema in the tree format."""
return self._jschema_rdd.schemaString()
def printSchema(self):
"""Prints out the schema in the tree format."""
print self.schemaString()
def count(self):
"""Return the number of elements in this RDD.
Unlike the base RDD implementation of count, this implementation
leverages the query optimizer to compute the count on the SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.count()
3L
>>> srdd.count() == srdd.map(lambda x: x).count()
True
"""
return self._jschema_rdd.count()
def collect(self):
"""Return a list that contains all of the rows in this RDD.
Each object in the list is a Row, the fields can be accessed as
attributes.
Unlike the base RDD implementation of collect, this implementation
leverages the query optimizer to perform a collect on the SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.collect()
[Row(field1=1, field2=u'row1'), ..., Row(field1=3, field2=u'row3')]
"""
with SCCallSiteSync(self.context) as css:
bytesInJava = self._jschema_rdd.baseSchemaRDD().collectToPython().iterator()
cls = _create_cls(self.schema())
return map(cls, self._collect_iterator_through_file(bytesInJava))
def take(self, num):
"""Take the first num rows of the RDD.
Each object in the list is a Row, the fields can be accessed as
attributes.
Unlike the base RDD implementation of take, this implementation
leverages the query optimizer to perform a collect on a SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.take(2)
[Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')]
"""
return self.limit(num).collect()
# Convert each object in the RDD to a Row with the right class
# for this SchemaRDD, so that fields can be accessed as attributes.
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
rdd = RDD(self._jrdd, self._sc, self._jrdd_deserializer)
schema = self.schema()
def applySchema(_, it):
cls = _create_cls(schema)
return itertools.imap(cls, it)
objrdd = rdd.mapPartitionsWithIndex(applySchema, preservesPartitioning)
return objrdd.mapPartitionsWithIndex(f, preservesPartitioning)
# We override the default cache/persist/checkpoint behavior
# as we want to cache the underlying SchemaRDD object in the JVM,
# not the PythonRDD checkpointed by the super class
def cache(self):
self.is_cached = True
self._jschema_rdd.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jschema_rdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=True):
self.is_cached = False
self._jschema_rdd.unpersist(blocking)
return self
def checkpoint(self):
self.is_checkpointed = True
self._jschema_rdd.checkpoint()
def isCheckpointed(self):
return self._jschema_rdd.isCheckpointed()
def getCheckpointFile(self):
checkpointFile = self._jschema_rdd.getCheckpointFile()
if checkpointFile.isPresent():
return checkpointFile.get()
def coalesce(self, numPartitions, shuffle=False):
rdd = self._jschema_rdd.coalesce(numPartitions, shuffle)
return SchemaRDD(rdd, self.sql_ctx)
def distinct(self, numPartitions=None):
if numPartitions is None:
rdd = self._jschema_rdd.distinct()
else:
rdd = self._jschema_rdd.distinct(numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
def intersection(self, other):
if (other.__class__ is SchemaRDD):
rdd = self._jschema_rdd.intersection(other._jschema_rdd)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only intersect with another SchemaRDD")
def repartition(self, numPartitions):
rdd = self._jschema_rdd.repartition(numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
def subtract(self, other, numPartitions=None):
if (other.__class__ is SchemaRDD):
if numPartitions is None:
rdd = self._jschema_rdd.subtract(other._jschema_rdd)
else:
rdd = self._jschema_rdd.subtract(other._jschema_rdd,
numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only subtract another SchemaRDD")
def _test():
import doctest
from array import array
from pyspark.context import SparkContext
# let doctest run in pyspark.sql, so DataTypes can be picklable
import pyspark.sql
from pyspark.sql import Row, SQLContext
globs = pyspark.sql.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext('local[4]', 'PythonTest', batchSize=2)
globs['sc'] = sc
globs['sqlCtx'] = SQLContext(sc)
globs['rdd'] = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql, globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 34.531371 | 100 | 0.60282 |
import sys
import types
import itertools
import warnings
import decimal
import datetime
import keyword
import warnings
from array import array
from operator import itemgetter
from pyspark.rdd import RDD
from pyspark.serializers import BatchedSerializer, PickleSerializer, CloudPickleSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from itertools import chain, ifilter, imap
from py4j.protocol import Py4JError
from py4j.java_collections import ListConverter, MapConverter
__all__ = [
"StringType", "BinaryType", "BooleanType", "TimestampType", "DecimalType",
"DoubleType", "FloatType", "ByteType", "IntegerType", "LongType",
"ShortType", "ArrayType", "MapType", "StructField", "StructType",
"SQLContext", "HiveContext", "SchemaRDD", "Row"]
class DataType(object):
"""Spark SQL DataType"""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class PrimitiveTypeSingleton(type):
"""Metaclass for PrimitiveType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(PrimitiveTypeSingleton, cls).__call__()
return cls._instances[cls]
class PrimitiveType(DataType):
"""Spark SQL PrimitiveType"""
__metaclass__ = PrimitiveTypeSingleton
def __eq__(self, other):
return self is other
class StringType(PrimitiveType):
"""Spark SQL StringType
The data type representing string values.
"""
class BinaryType(PrimitiveType):
"""Spark SQL BinaryType
The data type representing bytearray values.
"""
class BooleanType(PrimitiveType):
"""Spark SQL BooleanType
The data type representing bool values.
"""
class TimestampType(PrimitiveType):
"""Spark SQL TimestampType
The data type representing datetime.datetime values.
"""
class DecimalType(PrimitiveType):
"""Spark SQL DecimalType
The data type representing decimal.Decimal values.
"""
class DoubleType(PrimitiveType):
"""Spark SQL DoubleType
The data type representing float values.
"""
class FloatType(PrimitiveType):
"""Spark SQL FloatType
The data type representing single precision floating-point values.
"""
class ByteType(PrimitiveType):
"""Spark SQL ByteType
The data type representing int values with 1 singed byte.
"""
class IntegerType(PrimitiveType):
"""Spark SQL IntegerType
The data type representing int values.
"""
class LongType(PrimitiveType):
"""Spark SQL LongType
The data type representing long values. If the any value is
beyond the range of [-9223372036854775808, 9223372036854775807],
please use DecimalType.
"""
class ShortType(PrimitiveType):
"""Spark SQL ShortType
The data type representing int values with 2 signed bytes.
"""
class ArrayType(DataType):
"""Spark SQL ArrayType
The data type representing list values. An ArrayType object
comprises two fields, elementType (a DataType) and containsNull (a bool).
The field of elementType is used to specify the type of array elements.
The field of containsNull is used to specify if the array has None values.
"""
def __init__(self, elementType, containsNull=True):
"""Creates an ArrayType
:param elementType: the data type of elements.
:param containsNull: indicates whether the list contains None values.
>>> ArrayType(StringType) == ArrayType(StringType, True)
True
>>> ArrayType(StringType, False) == ArrayType(StringType)
False
"""
self.elementType = elementType
self.containsNull = containsNull
def __str__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
class MapType(DataType):
"""Spark SQL MapType
The data type representing dict values. A MapType object comprises
three fields, keyType (a DataType), valueType (a DataType) and
valueContainsNull (a bool).
The field of keyType is used to specify the type of keys in the map.
The field of valueType is used to specify the type of values in the map.
The field of valueContainsNull is used to specify if values of this
map has None values.
For values of a MapType column, keys are not allowed to have None values.
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""Creates a MapType
:param keyType: the data type of keys.
:param valueType: the data type of values.
:param valueContainsNull: indicates whether values contains
null values.
>>> (MapType(StringType, IntegerType)
... == MapType(StringType, IntegerType, True))
True
>>> (MapType(StringType, IntegerType, False)
... == MapType(StringType, FloatType))
False
"""
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
class StructField(DataType):
"""Spark SQL StructField
Represents a field in a StructType.
A StructField object comprises three fields, name (a string),
dataType (a DataType) and nullable (a bool). The field of name
is the name of a StructField. The field of dataType specifies
the data type of a StructField.
The field of nullable specifies if values of a StructField can
contain None values.
"""
def __init__(self, name, dataType, nullable):
"""Creates a StructField
:param name: the name of this field.
:param dataType: the data type of this field.
:param nullable: indicates whether values of this field
can be null.
>>> (StructField("f1", StringType, True)
... == StructField("f1", StringType, True))
True
>>> (StructField("f1", StringType, True)
... == StructField("f2", StringType, True))
False
"""
self.name = name
self.dataType = dataType
self.nullable = nullable
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
class StructType(DataType):
"""Spark SQL StructType
The data type representing rows.
A StructType object comprises a list of L{StructField}.
"""
def __init__(self, fields):
"""Creates a StructType
>>> struct1 = StructType([StructField("f1", StringType, True)])
>>> struct2 = StructType([StructField("f1", StringType, True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType, True)])
>>> struct2 = StructType([StructField("f1", StringType, True),
... [StructField("f2", IntegerType, False)]])
>>> struct1 == struct2
False
"""
self.fields = fields
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self.fields))
def _parse_datatype_list(datatype_list_string):
"""Parses a list of comma separated data types."""
index = 0
datatype_list = []
start = 0
depth = 0
while index < len(datatype_list_string):
if depth == 0 and datatype_list_string[index] == ",":
datatype_string = datatype_list_string[start:index].strip()
datatype_list.append(_parse_datatype_string(datatype_string))
start = index + 1
elif datatype_list_string[index] == "(":
depth += 1
elif datatype_list_string[index] == ")":
depth -= 1
index += 1
datatype_string = datatype_list_string[start:index].strip()
datatype_list.append(_parse_datatype_string(datatype_string))
return datatype_list
_all_primitive_types = dict((k, v) for k, v in globals().iteritems()
if type(v) is PrimitiveTypeSingleton and v.__base__ == PrimitiveType)
def _parse_datatype_string(datatype_string):
"""Parses the given data type string.
>>> def check_datatype(datatype):
... scala_datatype = sqlCtx._ssql_ctx.parseDataType(str(datatype))
... python_datatype = _parse_datatype_string(
... scala_datatype.toString())
... return datatype == python_datatype
>>> all(check_datatype(cls()) for cls in _all_primitive_types.values())
True
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
True
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
True
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
True
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False)])
>>> check_datatype(complex_structtype)
True
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
True
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
True
"""
index = datatype_string.find("(")
if index == -1:
index = len(datatype_string)
type_or_field = datatype_string[:index]
rest_part = datatype_string[index + 1:len(datatype_string) - 1].strip()
if type_or_field in _all_primitive_types:
return _all_primitive_types[type_or_field]()
elif type_or_field == "ArrayType":
last_comma_index = rest_part.rfind(",")
containsNull = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
containsNull = False
elementType = _parse_datatype_string(
rest_part[:last_comma_index].strip())
return ArrayType(elementType, containsNull)
elif type_or_field == "MapType":
last_comma_index = rest_part.rfind(",")
valueContainsNull = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
valueContainsNull = False
keyType, valueType = _parse_datatype_list(
rest_part[:last_comma_index].strip())
return MapType(keyType, valueType, valueContainsNull)
elif type_or_field == "StructField":
first_comma_index = rest_part.find(",")
name = rest_part[:first_comma_index].strip()
last_comma_index = rest_part.rfind(",")
nullable = True
if rest_part[last_comma_index + 1:].strip().lower() == "false":
nullable = False
dataType = _parse_datatype_string(
rest_part[first_comma_index + 1:last_comma_index].strip())
return StructField(name, dataType, nullable)
elif type_or_field == "StructType":
field_list_string = rest_part[rest_part.find("(") + 1:-1]
fields = _parse_datatype_list(field_list_string)
return StructType(fields)
_type_mappings = {
bool: BooleanType,
int: IntegerType,
long: LongType,
float: DoubleType,
str: StringType,
unicode: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.datetime: TimestampType,
datetime.date: TimestampType,
datetime.time: TimestampType,
}
def _infer_type(obj):
"""Infer the DataType from obj"""
if obj is None:
raise ValueError("Can not infer type for None")
dataType = _type_mappings.get(type(obj))
if dataType is not None:
return dataType()
if isinstance(obj, dict):
if not obj:
raise ValueError("Can not infer type for empty dict")
key, value = obj.iteritems().next()
return MapType(_infer_type(key), _infer_type(value), True)
elif isinstance(obj, (list, array)):
if not obj:
raise ValueError("Can not infer type for empty list/array")
return ArrayType(_infer_type(obj[0]), True)
else:
try:
return _infer_schema(obj)
except ValueError:
raise ValueError("not supported type: %s" % type(obj))
def _infer_schema(row):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, tuple):
if hasattr(row, "_fields"):
items = zip(row._fields, tuple(row))
elif hasattr(row, "__FIELDS__"):
items = zip(row.__FIELDS__, tuple(row))
elif all(isinstance(x, tuple) and len(x) == 2 for x in row):
items = row
else:
raise ValueError("Can't infer schema from tuple")
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise ValueError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _create_converter(obj, dataType):
"""Create an converter to drop the names of fields in obj """
if isinstance(dataType, ArrayType):
conv = _create_converter(obj[0], dataType.elementType)
return lambda row: map(conv, row)
elif isinstance(dataType, MapType):
value = obj.values()[0]
conv = _create_converter(value, dataType.valueType)
return lambda row: dict((k, conv(v)) for k, v in row.iteritems())
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
if isinstance(obj, dict):
conv = lambda o: tuple(o.get(n) for n in names)
elif isinstance(obj, tuple):
if hasattr(obj, "_fields"): # namedtuple
conv = tuple
elif hasattr(obj, "__FIELDS__"):
conv = tuple
elif all(isinstance(x, tuple) and len(x) == 2 for x in obj):
conv = lambda o: tuple(v for k, v in o)
else:
raise ValueError("unexpected tuple")
elif hasattr(obj, "__dict__"): # object
conv = lambda o: [o.__dict__.get(n, None) for n in names]
if all(isinstance(f.dataType, PrimitiveType) for f in dataType.fields):
return conv
row = conv(obj)
convs = [_create_converter(v, f.dataType)
for v, f in zip(row, dataType.fields)]
def nested_conv(row):
return tuple(f(v) for f, v in zip(convs, conv(row)))
return nested_conv
def _drop_schema(rows, schema):
""" all the names of fields, becoming tuples"""
iterator = iter(rows)
row = iterator.next()
converter = _create_converter(row, schema)
yield converter(row)
for i in iterator:
yield converter(i)
_BRACKETS = {'(': ')', '[': ']', '{': '}'}
def _split_schema_abstract(s):
"""
split the schema abstract into fields
>>> _split_schema_abstract("a b c")
['a', 'b', 'c']
>>> _split_schema_abstract("a(a b)")
['a(a b)']
>>> _split_schema_abstract("a b[] c{a b}")
['a', 'b[]', 'c{a b}']
>>> _split_schema_abstract(" ")
[]
"""
r = []
w = ''
brackets = []
for c in s:
if c == ' ' and not brackets:
if w:
r.append(w)
w = ''
else:
w += c
if c in _BRACKETS:
brackets.append(c)
elif c in _BRACKETS.values():
if not brackets or c != _BRACKETS[brackets.pop()]:
raise ValueError("unexpected " + c)
if brackets:
raise ValueError("brackets not closed: %s" % brackets)
if w:
r.append(w)
return r
def _parse_field_abstract(s):
"""
Parse a field in schema abstract
>>> _parse_field_abstract("a")
StructField(a,None,true)
>>> _parse_field_abstract("b(c d)")
StructField(b,StructType(...c,None,true),StructField(d...
>>> _parse_field_abstract("a[]")
StructField(a,ArrayType(None,true),true)
>>> _parse_field_abstract("a{[]}")
StructField(a,MapType(None,ArrayType(None,true),true),true)
"""
if set(_BRACKETS.keys()) & set(s):
idx = min((s.index(c) for c in _BRACKETS if c in s))
name = s[:idx]
return StructField(name, _parse_schema_abstract(s[idx:]), True)
else:
return StructField(s, None, True)
def _parse_schema_abstract(s):
"""
parse abstract into schema
>>> _parse_schema_abstract("a b c")
StructType...a...b...c...
>>> _parse_schema_abstract("a[b c] b{}")
StructType...a,ArrayType...b...c...b,MapType...
>>> _parse_schema_abstract("c{} d{a b}")
StructType...c,MapType...d,MapType...a...b...
>>> _parse_schema_abstract("a b(t)").fields[1]
StructField(b,StructType(List(StructField(t,None,true))),true)
"""
s = s.strip()
if not s:
return
elif s.startswith('('):
return _parse_schema_abstract(s[1:-1])
elif s.startswith('['):
return ArrayType(_parse_schema_abstract(s[1:-1]), True)
elif s.startswith('{'):
return MapType(None, _parse_schema_abstract(s[1:-1]))
parts = _split_schema_abstract(s)
fields = [_parse_field_abstract(p) for p in parts]
return StructType(fields)
def _infer_schema_type(obj, dataType):
"""
Fill the dataType with types infered from obj
>>> schema = _parse_schema_abstract("a b c")
>>> row = (1, 1.0, "str")
>>> _infer_schema_type(row, schema)
StructType...IntegerType...DoubleType...StringType...
>>> row = [[1], {"key": (1, 2.0)}]
>>> schema = _parse_schema_abstract("a[] b{c d}")
>>> _infer_schema_type(row, schema)
StructType...a,ArrayType...b,MapType(StringType,...c,IntegerType...
"""
if dataType is None:
return _infer_type(obj)
if not obj:
raise ValueError("Can not infer type from empty value")
if isinstance(dataType, ArrayType):
eType = _infer_schema_type(obj[0], dataType.elementType)
return ArrayType(eType, True)
elif isinstance(dataType, MapType):
k, v = obj.iteritems().next()
return MapType(_infer_type(k),
_infer_schema_type(v, dataType.valueType))
elif isinstance(dataType, StructType):
fs = dataType.fields
assert len(fs) == len(obj), \
"Obj(%s) have different length with fields(%s)" % (obj, fs)
fields = [StructField(f.name, _infer_schema_type(o, f.dataType), True)
for o, f in zip(obj, fs)]
return StructType(fields)
else:
raise ValueError("Unexpected dataType: %s" % dataType)
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list),
}
def _verify_type(obj, dataType):
"""
Verify the type of obj against dataType, raise an exception if
they do not match.
>>> _verify_type(None, StructType([]))
>>> _verify_type("", StringType())
>>> _verify_type(0, IntegerType())
>>> _verify_type(range(3), ArrayType(ShortType()))
>>> _verify_type(set(), ArrayType(StringType())) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _verify_type({}, MapType(StringType(), IntegerType()))
>>> _verify_type((), StructType([]))
>>> _verify_type([], StructType([]))
>>> _verify_type([1], StructType([])) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
# all objects are nullable
if obj is None:
return
_type = type(dataType)
assert _type in _acceptable_types, "unkown datatype: %s" % dataType
# subclass of them can not be deserialized in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError("%s can not accept abject in type %s"
% (dataType, type(obj)))
if isinstance(dataType, ArrayType):
for i in obj:
_verify_type(i, dataType.elementType)
elif isinstance(dataType, MapType):
for k, v in obj.iteritems():
_verify_type(k, dataType.keyType)
_verify_type(v, dataType.valueType)
elif isinstance(dataType, StructType):
if len(obj) != len(dataType.fields):
raise ValueError("Length of object (%d) does not match with"
"length of fields (%d)" % (len(obj), len(dataType.fields)))
for v, f in zip(obj, dataType.fields):
_verify_type(v, f.dataType)
_cached_cls = {}
def _restore_object(dataType, obj):
""" Restore object during unpickling. """
# use id(dataType) as key to speed up lookup in dict
# Because of batched pickling, dataType will be the
# same object in mose cases.
k = id(dataType)
cls = _cached_cls.get(k)
if cls is None:
# use dataType as key to avoid create multiple class
cls = _cached_cls.get(dataType)
if cls is None:
cls = _create_cls(dataType)
_cached_cls[dataType] = cls
_cached_cls[k] = cls
return cls(obj)
def _create_object(cls, v):
""" Create an customized object with class `cls`. """
return cls(v) if v is not None else v
def _create_getter(dt, i):
""" Create a getter for item `i` with schema """
cls = _create_cls(dt)
def getter(self):
return _create_object(cls, self[i])
return getter
def _has_struct(dt):
"""Return whether `dt` is or has StructType in it"""
if isinstance(dt, StructType):
return True
elif isinstance(dt, ArrayType):
return _has_struct(dt.elementType)
elif isinstance(dt, MapType):
return _has_struct(dt.valueType)
return False
def _create_properties(fields):
"""Create properties according to fields"""
ps = {}
for i, f in enumerate(fields):
name = f.name
if (name.startswith("__") and name.endswith("__")
or keyword.iskeyword(name)):
warnings.warn("field name %s can not be accessed in Python,"
"use position to access it instead" % name)
if _has_struct(f.dataType):
# delay creating object until accessing it
getter = _create_getter(f.dataType, i)
else:
getter = itemgetter(i)
ps[name] = property(getter)
return ps
def _create_cls(dataType):
"""
Create an class by dataType
The created class is similar to namedtuple, but can have nested schema.
>>> schema = _parse_schema_abstract("a b c")
>>> row = (1, 1.0, "str")
>>> schema = _infer_schema_type(row, schema)
>>> obj = _create_cls(schema)(row)
>>> import pickle
>>> pickle.loads(pickle.dumps(obj))
Row(a=1, b=1.0, c='str')
>>> row = [[1], {"key": (1, 2.0)}]
>>> schema = _parse_schema_abstract("a[] b{c d}")
>>> schema = _infer_schema_type(row, schema)
>>> obj = _create_cls(schema)(row)
>>> pickle.loads(pickle.dumps(obj))
Row(a=[1], b={'key': Row(c=1, d=2.0)})
>>> pickle.loads(pickle.dumps(obj.a))
[1]
>>> pickle.loads(pickle.dumps(obj.b))
{'key': Row(c=1, d=2.0)}
"""
if isinstance(dataType, ArrayType):
cls = _create_cls(dataType.elementType)
def List(l):
if l is None:
return
return [_create_object(cls, v) for v in l]
return List
elif isinstance(dataType, MapType):
cls = _create_cls(dataType.valueType)
def Dict(d):
if d is None:
return
return dict((k, _create_object(cls, v)) for k, v in d.items())
return Dict
elif not isinstance(dataType, StructType):
raise Exception("unexpected data type: %s" % dataType)
class Row(tuple):
""" Row in SchemaRDD """
__DATATYPE__ = dataType
__FIELDS__ = tuple(f.name for f in dataType.fields)
__slots__ = ()
# create property for fast access
locals().update(_create_properties(dataType.fields))
def __repr__(self):
# call collect __repr__ for nested objects
return ("Row(%s)" % ", ".join("%s=%r" % (n, getattr(self, n))
for n in self.__FIELDS__))
def __reduce__(self):
return (_restore_object, (self.__DATATYPE__, tuple(self)))
return Row
class SQLContext(object):
"""Main entry point for Spark SQL functionality.
A SQLContext can be used create L{SchemaRDD}, register L{SchemaRDD} as
tables, execute SQL over tables, cache tables, and read parquet files.
"""
def __init__(self, sparkContext, sqlContext=None):
"""Create a new SQLContext.
@param sparkContext: The SparkContext to wrap.
@param sqlContext: An optional JVM Scala SQLContext. If set, we do not instatiate a new
SQLContext in the JVM, instead we make all calls to this object.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.inferSchema(srdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> bad_rdd = sc.parallelize([1,2,3])
>>> sqlCtx.inferSchema(bad_rdd) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> from datetime import datetime
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1L,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> srdd = sqlCtx.inferSchema(allTypes)
>>> srdd.registerTempTable("allTypes")
>>> sqlCtx.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row(c0=2, c1=2.0, c2=False, c3=2, c4=0...8, 1, 14, 1, 5), a=1)]
>>> srdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time,
... x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, ...(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
self._pythonToJava = self._jvm.PythonRDD.pythonToJavaArray
self._scala_SQLContext = sqlContext
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
if self._scala_SQLContext is None:
self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
return self._scala_SQLContext
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a lambda function as a UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
>>> sqlCtx.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlCtx.sql("SELECT stringLengthString('test')").collect()
[Row(c0=u'4')]
>>> sqlCtx.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlCtx.sql("SELECT stringLengthInt('test')").collect()
[Row(c0=4)]
"""
func = lambda _, it: imap(lambda x: f(*x), it)
command = (func,
BatchedSerializer(PickleSerializer(), 1024),
BatchedSerializer(PickleSerializer(), 1024))
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if pickled_command > (1 << 20): # 1M
broadcast = self._sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self._sc._pickled_broadcast_vars],
self._sc._gateway._gateway_client)
self._sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(self._sc.environment,
self._sc._gateway._gateway_client)
includes = ListConverter().convert(self._sc._python_includes,
self._sc._gateway._gateway_client)
self._ssql_ctx.registerPython(name,
bytearray(pickled_command),
env,
includes,
self._sc.pythonExec,
broadcast_vars,
self._sc._javaAccumulator,
str(returnType))
def inferSchema(self, rdd):
"""Infer and apply a schema to an RDD of L{Row}.
We peek at the first row of the RDD to determine the fields' names
and types. Nested collections are supported, which include array,
dict, list, Row, tuple, namedtuple, or object.
All the rows in `rdd` should have the same type with the first one,
or it will cause runtime exceptions.
Each row could be L{pyspark.sql.Row} object or namedtuple or objects,
using dict is deprecated.
>>> rdd = sc.parallelize(
... [Row(field1=1, field2="row1"),
... Row(field1=2, field2="row2"),
... Row(field1=3, field2="row3")])
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.collect()[0]
Row(field1=1, field2=u'row1')
>>> NestedRow = Row("f1", "f2")
>>> nestedRdd1 = sc.parallelize([
... NestedRow(array('i', [1, 2]), {"row1": 1.0}),
... NestedRow(array('i', [2, 3]), {"row2": 2.0})])
>>> srdd = sqlCtx.inferSchema(nestedRdd1)
>>> srdd.collect()
[Row(f1=[1, 2], f2={u'row1': 1.0}), ..., f2={u'row2': 2.0})]
>>> nestedRdd2 = sc.parallelize([
... NestedRow([[1, 2], [2, 3]], [1, 2]),
... NestedRow([[2, 3], [3, 4]], [2, 3])])
>>> srdd = sqlCtx.inferSchema(nestedRdd2)
>>> srdd.collect()
[Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), ..., f2=[2, 3])]
"""
if isinstance(rdd, SchemaRDD):
raise TypeError("Cannot apply schema to SchemaRDD")
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated,"
"please use pyspark.sql.Row instead")
schema = _infer_schema(first)
rdd = rdd.mapPartitions(lambda rows: _drop_schema(rows, schema))
return self.applySchema(rdd, schema)
def applySchema(self, rdd, schema):
"""
Applies the given schema to the given RDD of L{tuple} or L{list}.
These tuples or lists can contain complex nested structures like
lists, maps or nested rows.
The schema should be a StructType.
It is important that the schema matches the types of the objects
in each row or exceptions could be thrown at runtime.
>>> rdd2 = sc.parallelize([(1, "row1"), (2, "row2"), (3, "row3")])
>>> schema = StructType([StructField("field1", IntegerType(), False),
... StructField("field2", StringType(), False)])
>>> srdd = sqlCtx.applySchema(rdd2, schema)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.sql("SELECT * from table1")
>>> srdd2.collect()
[Row(field1=1, field2=u'row1'),..., Row(field1=3, field2=u'row3')]
>>> from datetime import datetime
>>> rdd = sc.parallelize([(127, -128L, -32768, 32767, 2147483647L, 1.0,
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3], None)])
>>> schema = StructType([
... StructField("byte1", ByteType(), False),
... StructField("byte2", ByteType(), False),
... StructField("short1", ShortType(), False),
... StructField("short2", ShortType(), False),
... StructField("int", IntegerType(), False),
... StructField("float", FloatType(), False),
... StructField("time", TimestampType(), False),
... StructField("map",
... MapType(StringType(), IntegerType(), False), False),
... StructField("struct",
... StructType([StructField("b", ShortType(), False)]), False),
... StructField("list", ArrayType(ByteType(), False), False),
... StructField("null", DoubleType(), True)])
>>> srdd = sqlCtx.applySchema(rdd, schema)
>>> results = srdd.map(
... lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int, x.float, x.time,
... x.map["a"], x.struct.b, x.list, x.null))
>>> results.collect()[0]
(127, -128, -32768, 32767, 2147483647, 1.0, ...(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
>>> srdd.registerTempTable("table2")
>>> sqlCtx.sql(
... "SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
... "short1 + 1 AS short1, short2 - 1 AS short2, int - 1 AS int, " +
... "float + 1.5 as float FROM table2").collect()
[Row(byte1=126, byte2=-127, short1=-32767, short2=32766, int=2147483646, float=2.5)]
>>> rdd = sc.parallelize([(127, -32768, 1.0,
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3])])
>>> abstract = "byte short float time map{} struct(b) list[]"
>>> schema = _parse_schema_abstract(abstract)
>>> typedSchema = _infer_schema_type(rdd.first(), schema)
>>> srdd = sqlCtx.applySchema(rdd, typedSchema)
>>> srdd.collect()
[Row(byte=127, short=-32768, float=1.0, time=..., list=[1, 2, 3])]
"""
if isinstance(rdd, SchemaRDD):
raise TypeError("Cannot apply schema to SchemaRDD")
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
rows = rdd.take(10)
if rows and isinstance(rows[0], tuple) and rows[0].__class__.__name__ == 'Row':
rdd = rdd.map(tuple)
rows = rdd.take(10)
for row in rows:
_verify_type(row, schema)
batched = isinstance(rdd._jrdd_deserializer, BatchedSerializer)
jrdd = self._pythonToJava(rdd._jrdd, batched)
srdd = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), str(schema))
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def registerRDDAsTable(self, rdd, tableName):
"""Registers the given RDD as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of
SQLContext.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
"""
if (rdd.__class__ is SchemaRDD):
srdd = rdd._jschema_rdd.baseSchemaRDD()
self._ssql_ctx.registerRDDAsTable(srdd, tableName)
else:
raise ValueError("Can only register SchemaRDD as table")
def parquetFile(self, path):
"""Loads a Parquet file, returning the result as a L{SchemaRDD}.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
jschema_rdd = self._ssql_ctx.parquetFile(path).toJavaSchemaRDD()
return SchemaRDD(jschema_rdd, self)
def jsonFile(self, path, schema=None):
"""
Loads a text file storing one JSON object per line as a
L{SchemaRDD}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it goes through the entire dataset once to determine
the schema.
>>> import tempfile, shutil
>>> jsonFile = tempfile.mkdtemp()
>>> shutil.rmtree(jsonFile)
>>> ofn = open(jsonFile, 'w')
>>> for json in jsonStrings:
... print>>ofn, json
>>> ofn.close()
>>> srdd1 = sqlCtx.jsonFile(jsonFile)
>>> sqlCtx.registerRDDAsTable(srdd1, "table1")
>>> srdd2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in srdd2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> srdd3 = sqlCtx.jsonFile(jsonFile, srdd1.schema())
>>> sqlCtx.registerRDDAsTable(srdd3, "table2")
>>> srdd4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in srdd4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> srdd5 = sqlCtx.jsonFile(jsonFile, schema)
>>> sqlCtx.registerRDDAsTable(srdd5, "table3")
>>> srdd6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> srdd6.collect()
[Row(f1=u'row1', f2=None, f3=None)...Row(f1=u'row3', f2=[], f3=None)]
"""
if schema is None:
srdd = self._ssql_ctx.jsonFile(path)
else:
scala_datatype = self._ssql_ctx.parseDataType(str(schema))
srdd = self._ssql_ctx.jsonFile(path, scala_datatype)
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def jsonRDD(self, rdd, schema=None):
"""Loads an RDD storing one JSON object per string as a L{SchemaRDD}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it goes through the entire dataset once to determine
the schema.
>>> srdd1 = sqlCtx.jsonRDD(json)
>>> sqlCtx.registerRDDAsTable(srdd1, "table1")
>>> srdd2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in srdd2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> srdd3 = sqlCtx.jsonRDD(json, srdd1.schema())
>>> sqlCtx.registerRDDAsTable(srdd3, "table2")
>>> srdd4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in srdd4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> srdd5 = sqlCtx.jsonRDD(json, schema)
>>> sqlCtx.registerRDDAsTable(srdd5, "table3")
>>> srdd6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> srdd6.collect()
[Row(f1=u'row1', f2=None,...Row(f1=u'row3', f2=[], f3=None)]
>>> sqlCtx.jsonRDD(sc.parallelize(['{}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
>>> sqlCtx.jsonRDD(sc.parallelize(['{"key0": null}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
"""
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = rdd.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._jvm.BytesToString())
if schema is None:
srdd = self._ssql_ctx.jsonRDD(jrdd.rdd())
else:
scala_datatype = self._ssql_ctx.parseDataType(str(schema))
srdd = self._ssql_ctx.jsonRDD(jrdd.rdd(), scala_datatype)
return SchemaRDD(srdd.toJavaSchemaRDD(), self)
def sql(self, sqlQuery):
"""Return a L{SchemaRDD} representing the result of the given query.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> srdd2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return SchemaRDD(self._ssql_ctx.sql(sqlQuery).toJavaSchemaRDD(), self)
def table(self, tableName):
"""Returns the specified table as a L{SchemaRDD}.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(srdd, "table1")
>>> srdd2 = sqlCtx.table("table1")
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
return SchemaRDD(self._ssql_ctx.table(tableName).toJavaSchemaRDD(), self)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from hive-site.xml on the classpath.
It supports running both SQL and HiveQL commands.
"""
def __init__(self, sparkContext, hiveContext=None):
"""Create a new HiveContext.
@param sparkContext: The SparkContext to wrap.
@param hiveContext: An optional JVM Scala HiveContext. If set, we do not instatiate a new
HiveContext in the JVM, instead we make all calls to this object.
"""
SQLContext.__init__(self, sparkContext)
if hiveContext:
self._scala_HiveContext = hiveContext
@property
def _ssql_ctx(self):
try:
if not hasattr(self, '_scala_HiveContext'):
self._scala_HiveContext = self._get_hive_ctx()
return self._scala_HiveContext
except Py4JError as e:
raise Exception("You must build Spark with Hive. "
"Export 'SPARK_HIVE=true' and run "
"sbt/sbt assembly", e)
def _get_hive_ctx(self):
return self._jvm.HiveContext(self._jsc.sc())
def hiveql(self, hqlQuery):
"""
DEPRECATED: Use sql()
"""
warnings.warn("hiveql() is deprecated as the sql function now parses using HiveQL by" +
"default. The SQL dialect for parsing can be set using 'spark.sql.dialect'",
DeprecationWarning)
return SchemaRDD(self._ssql_ctx.hiveql(hqlQuery).toJavaSchemaRDD(), self)
def hql(self, hqlQuery):
"""
DEPRECATED: Use sql()
"""
warnings.warn("hql() is deprecated as the sql function now parses using HiveQL by" +
"default. The SQL dialect for parsing can be set using 'spark.sql.dialect'",
DeprecationWarning)
return self.hiveql(hqlQuery)
class LocalHiveContext(HiveContext):
"""Starts up an instance of hive where metadata is stored locally.
An in-process metadata data is created with data stored in ./metadata.
Warehouse data is stored in in ./warehouse.
>>> import os
>>> hiveCtx = LocalHiveContext(sc)
>>> try:
... supress = hiveCtx.sql("DROP TABLE src")
... except Exception:
... pass
>>> kv1 = os.path.join(os.environ["SPARK_HOME"],
... 'examples/src/main/resources/kv1.txt')
>>> supress = hiveCtx.sql(
... "CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
>>> supress = hiveCtx.sql("LOAD DATA LOCAL INPATH '%s' INTO TABLE src"
... % kv1)
>>> results = hiveCtx.sql("FROM src SELECT value"
... ).map(lambda r: int(r.value.split('_')[1]))
>>> num = results.count()
>>> reduce_sum = results.reduce(lambda x, y: x + y)
>>> num
500
>>> reduce_sum
130091
"""
def __init__(self, sparkContext, sqlContext=None):
HiveContext.__init__(self, sparkContext, sqlContext)
warnings.warn("LocalHiveContext is deprecated. "
"Use HiveContext instead.", DeprecationWarning)
def _get_hive_ctx(self):
return self._jvm.LocalHiveContext(self._jsc.sc())
class TestHiveContext(HiveContext):
def _get_hive_ctx(self):
return self._jvm.TestHiveContext(self._jsc.sc())
def _create_row(fields, values):
row = Row(*values)
row.__FIELDS__ = fields
return row
class Row(tuple):
"""
A row in L{SchemaRDD}. The fields in it can be accessed like attributes.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row.name, row.age
('Alice', 11)
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if args:
return tuple.__new__(self, args)
elif kwargs:
names = sorted(kwargs.keys())
values = tuple(kwargs[n] for n in names)
row = tuple.__new__(self, values)
row.__FIELDS__ = names
return row
else:
raise ValueError("No args or kwargs")
def __call__(self, *args):
"""create new Row object"""
return _create_row(self, args)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
idx = self.__FIELDS__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
def __reduce__(self):
if hasattr(self, "__FIELDS__"):
return (_create_row, (self.__FIELDS__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
if hasattr(self, "__FIELDS__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__FIELDS__, self))
else:
return "<Row(%s)>" % ", ".join(self)
def inherit_doc(cls):
for name, func in vars(cls).items():
if name.startswith("_"):
continue
if not func.__doc__:
for parent in cls.__bases__:
parent_func = getattr(parent, name, None)
if parent_func and getattr(parent_func, "__doc__", None):
func.__doc__ = parent_func.__doc__
break
return cls
@inherit_doc
class SchemaRDD(RDD):
"""An RDD of L{Row} objects that has an associated schema.
The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
utilize the relational query api exposed by Spark SQL.
For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
L{SchemaRDD} is not operated on directly, as it's underlying
implementation is an RDD composed of Java objects. Instead it is
converted to a PythonRDD in the JVM, on which Python operations can
be done.
This class receives raw tuples from Java but assigns a class to it in
all its data-collection methods (mapPartitionsWithIndex, collect, take,
etc) so that PySpark sees them as Row objects with named fields.
"""
def __init__(self, jschema_rdd, sql_ctx):
self.sql_ctx = sql_ctx
self._sc = sql_ctx._sc
clsName = jschema_rdd.getClass().getName()
assert clsName.endswith("JavaSchemaRDD"), "jschema_rdd must be JavaSchemaRDD"
self._jschema_rdd = jschema_rdd
self._id = None
self.is_cached = False
self.is_checkpointed = False
self.ctx = self.sql_ctx._sc
# the _jrdd is created by javaToPython(), serialized by pickle
self._jrdd_deserializer = BatchedSerializer(PickleSerializer())
@property
def _jrdd(self):
"""Lazy evaluation of PythonRDD object.
Only done when a user calls methods defined by the
L{pyspark.rdd.RDD} super class (map, filter, etc.).
"""
if not hasattr(self, '_lazy_jrdd'):
self._lazy_jrdd = self._jschema_rdd.baseSchemaRDD().javaToPython()
return self._lazy_jrdd
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def limit(self, num):
"""Limit the result count to the number specified.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.limit(2).collect()
[Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')]
>>> srdd.limit(0).collect()
[]
"""
rdd = self._jschema_rdd.baseSchemaRDD().limit(num).toJavaSchemaRDD()
return SchemaRDD(rdd, self.sql_ctx)
def saveAsParquetFile(self, path):
"""Save the contents as a Parquet file, preserving the schema.
Files that are written out using this method can be read back in as
a SchemaRDD using the L{SQLContext.parquetFile} method.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.saveAsParquetFile(parquetFile)
>>> srdd2 = sqlCtx.parquetFile(parquetFile)
>>> sorted(srdd2.collect()) == sorted(srdd.collect())
True
"""
self._jschema_rdd.saveAsParquetFile(path)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the L{SQLContext}
that was used to create this SchemaRDD.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.registerTempTable("test")
>>> srdd2 = sqlCtx.sql("select * from test")
>>> sorted(srdd.collect()) == sorted(srdd2.collect())
True
"""
self._jschema_rdd.registerTempTable(name)
def registerAsTable(self, name):
"""DEPRECATED: use registerTempTable() instead"""
warnings.warn("Use registerTempTable instead of registerAsTable.", DeprecationWarning)
self.registerTempTable(name)
def insertInto(self, tableName, overwrite=False):
"""Inserts the contents of this SchemaRDD into the specified table.
Optionally overwriting any existing data.
"""
self._jschema_rdd.insertInto(tableName, overwrite)
def saveAsTable(self, tableName):
"""Creates a new table with the contents of this SchemaRDD."""
self._jschema_rdd.saveAsTable(tableName)
def schema(self):
"""Returns the schema of this SchemaRDD (represented by
a L{StructType})."""
return _parse_datatype_string(self._jschema_rdd.baseSchemaRDD().schema().toString())
def schemaString(self):
"""Returns the output schema in the tree format."""
return self._jschema_rdd.schemaString()
def printSchema(self):
"""Prints out the schema in the tree format."""
print self.schemaString()
def count(self):
"""Return the number of elements in this RDD.
Unlike the base RDD implementation of count, this implementation
leverages the query optimizer to compute the count on the SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.count()
3L
>>> srdd.count() == srdd.map(lambda x: x).count()
True
"""
return self._jschema_rdd.count()
def collect(self):
"""Return a list that contains all of the rows in this RDD.
Each object in the list is a Row, the fields can be accessed as
attributes.
Unlike the base RDD implementation of collect, this implementation
leverages the query optimizer to perform a collect on the SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.collect()
[Row(field1=1, field2=u'row1'), ..., Row(field1=3, field2=u'row3')]
"""
with SCCallSiteSync(self.context) as css:
bytesInJava = self._jschema_rdd.baseSchemaRDD().collectToPython().iterator()
cls = _create_cls(self.schema())
return map(cls, self._collect_iterator_through_file(bytesInJava))
def take(self, num):
"""Take the first num rows of the RDD.
Each object in the list is a Row, the fields can be accessed as
attributes.
Unlike the base RDD implementation of take, this implementation
leverages the query optimizer to perform a collect on a SchemaRDD,
which supports features such as filter pushdown.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.take(2)
[Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')]
"""
return self.limit(num).collect()
# Convert each object in the RDD to a Row with the right class
# for this SchemaRDD, so that fields can be accessed as attributes.
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
rdd = RDD(self._jrdd, self._sc, self._jrdd_deserializer)
schema = self.schema()
def applySchema(_, it):
cls = _create_cls(schema)
return itertools.imap(cls, it)
objrdd = rdd.mapPartitionsWithIndex(applySchema, preservesPartitioning)
return objrdd.mapPartitionsWithIndex(f, preservesPartitioning)
# We override the default cache/persist/checkpoint behavior
# as we want to cache the underlying SchemaRDD object in the JVM,
# not the PythonRDD checkpointed by the super class
def cache(self):
self.is_cached = True
self._jschema_rdd.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jschema_rdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=True):
self.is_cached = False
self._jschema_rdd.unpersist(blocking)
return self
def checkpoint(self):
self.is_checkpointed = True
self._jschema_rdd.checkpoint()
def isCheckpointed(self):
return self._jschema_rdd.isCheckpointed()
def getCheckpointFile(self):
checkpointFile = self._jschema_rdd.getCheckpointFile()
if checkpointFile.isPresent():
return checkpointFile.get()
def coalesce(self, numPartitions, shuffle=False):
rdd = self._jschema_rdd.coalesce(numPartitions, shuffle)
return SchemaRDD(rdd, self.sql_ctx)
def distinct(self, numPartitions=None):
if numPartitions is None:
rdd = self._jschema_rdd.distinct()
else:
rdd = self._jschema_rdd.distinct(numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
def intersection(self, other):
if (other.__class__ is SchemaRDD):
rdd = self._jschema_rdd.intersection(other._jschema_rdd)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only intersect with another SchemaRDD")
def repartition(self, numPartitions):
rdd = self._jschema_rdd.repartition(numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
def subtract(self, other, numPartitions=None):
if (other.__class__ is SchemaRDD):
if numPartitions is None:
rdd = self._jschema_rdd.subtract(other._jschema_rdd)
else:
rdd = self._jschema_rdd.subtract(other._jschema_rdd,
numPartitions)
return SchemaRDD(rdd, self.sql_ctx)
else:
raise ValueError("Can only subtract another SchemaRDD")
def _test():
import doctest
from array import array
from pyspark.context import SparkContext
# let doctest run in pyspark.sql, so DataTypes can be picklable
import pyspark.sql
from pyspark.sql import Row, SQLContext
globs = pyspark.sql.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext('local[4]', 'PythonTest', batchSize=2)
globs['sc'] = sc
globs['sqlCtx'] = SQLContext(sc)
globs['rdd'] = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql, globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| false | true |
f71d24e917d59197e54b40a06a24d91d6d9bc161 | 5,788 | py | Python | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/_net_app_management_client.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-09-07T18:43:20.000Z | 2021-09-07T18:43:20.000Z | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/_net_app_management_client.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/_net_app_management_client.py | msyyc/azure-sdk-for-python | e2dba75181f8b4336ae57e75aa391322c12c3123 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import NetAppManagementClientConfiguration
from .operations import Operations
from .operations import NetAppResourceOperations
from .operations import AccountsOperations
from .operations import PoolsOperations
from .operations import VolumesOperations
from .operations import SnapshotsOperations
from .operations import SnapshotPoliciesOperations
from .operations import AccountBackupsOperations
from .operations import BackupsOperations
from .operations import BackupPoliciesOperations
from .operations import VaultsOperations
from . import models
class NetAppManagementClient(object):
"""Microsoft NetApp Azure Resource Provider specification.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.netapp.operations.Operations
:ivar net_app_resource: NetAppResourceOperations operations
:vartype net_app_resource: azure.mgmt.netapp.operations.NetAppResourceOperations
:ivar accounts: AccountsOperations operations
:vartype accounts: azure.mgmt.netapp.operations.AccountsOperations
:ivar pools: PoolsOperations operations
:vartype pools: azure.mgmt.netapp.operations.PoolsOperations
:ivar volumes: VolumesOperations operations
:vartype volumes: azure.mgmt.netapp.operations.VolumesOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.netapp.operations.SnapshotsOperations
:ivar snapshot_policies: SnapshotPoliciesOperations operations
:vartype snapshot_policies: azure.mgmt.netapp.operations.SnapshotPoliciesOperations
:ivar account_backups: AccountBackupsOperations operations
:vartype account_backups: azure.mgmt.netapp.operations.AccountBackupsOperations
:ivar backups: BackupsOperations operations
:vartype backups: azure.mgmt.netapp.operations.BackupsOperations
:ivar backup_policies: BackupPoliciesOperations operations
:vartype backup_policies: azure.mgmt.netapp.operations.BackupPoliciesOperations
:ivar vaults: VaultsOperations operations
:vartype vaults: azure.mgmt.netapp.operations.VaultsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetAppManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.net_app_resource = NetAppResourceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.accounts = AccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.pools = PoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.volumes = VolumesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshot_policies = SnapshotPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.account_backups = AccountBackupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.backups = BackupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.backup_policies = BackupPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vaults = VaultsOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetAppManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 47.834711 | 172 | 0.72944 |
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import NetAppManagementClientConfiguration
from .operations import Operations
from .operations import NetAppResourceOperations
from .operations import AccountsOperations
from .operations import PoolsOperations
from .operations import VolumesOperations
from .operations import SnapshotsOperations
from .operations import SnapshotPoliciesOperations
from .operations import AccountBackupsOperations
from .operations import BackupsOperations
from .operations import BackupPoliciesOperations
from .operations import VaultsOperations
from . import models
class NetAppManagementClient(object):
def __init__(
self,
credential,
subscription_id,
base_url=None,
**kwargs
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetAppManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.net_app_resource = NetAppResourceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.accounts = AccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.pools = PoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.volumes = VolumesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshot_policies = SnapshotPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.account_backups = AccountBackupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.backups = BackupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.backup_policies = BackupPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vaults = VaultsOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| true | true |
f71d255737c2f1063170be076348fe7c0e4e203c | 456 | py | Python | fairseq/fairseq/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | fairseq/fairseq/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | fairseq/fairseq/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['pdb']
__version__ = '0.8.0'
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.tasks # noqa
| 28.5 | 66 | 0.723684 |
__all__ = ['pdb']
__version__ = '0.8.0'
import fairseq.criterions
import fairseq.models
import fairseq.modules
import fairseq.optim
import fairseq.optim.lr_scheduler
import fairseq.pdb
import fairseq.tasks
| true | true |
f71d2594ab3dd2aaf502d1ae2653ba26eaa14a87 | 2,011 | py | Python | tests/pytests/materials/obsolete/TestGenMaxwellPlaneStrain.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | 93 | 2015-01-08T16:41:22.000Z | 2022-02-25T13:40:02.000Z | tests/pytests/materials/obsolete/TestGenMaxwellPlaneStrain.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | 277 | 2015-02-20T16:27:35.000Z | 2022-03-30T21:13:09.000Z | tests/pytests/materials/obsolete/TestGenMaxwellPlaneStrain.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | 71 | 2015-03-24T12:11:08.000Z | 2022-03-03T04:26:02.000Z | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file tests/pytests/materials/TestGenMaxwellPlaneStrain.py
## @brief Unit testing of GenMaxwellPlaneStrain object.
import unittest
from pylith.materials.GenMaxwellPlaneStrain import GenMaxwellPlaneStrain
# ----------------------------------------------------------------------
class TestGenMaxwellPlaneStrain(unittest.TestCase):
"""
Unit testing of GenMaxwellPlaneStrain object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.material = GenMaxwellPlaneStrain()
return
def test_constructor(self):
"""
Test constructor.
"""
self.assertEqual(2, self.material.dimension())
return
def test_useElasticBehavior(self):
"""
Test useElasticBehavior().
"""
self.material.useElasticBehavior(False)
return
def testHasStateVars(self):
self.failUnless(self.material.hasStateVars())
return
def testTensorSize(self):
self.assertEqual(3, self.material.tensorSize())
return
def testNeedNewJacobian(self):
"""
Test needNewJacobian().
"""
# Default should be False.
self.failIf(self.material.needNewJacobian())
# Changing time step should require new Jacobian.
self.material.timeStep(1.0)
self.material.timeStep(2.0)
self.failUnless(self.material.needNewJacobian())
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.materials.GenMaxwellPlaneStrain import material
m = material()
return
# End of file
| 22.098901 | 72 | 0.624068 |
llPlaneStrain(unittest.TestCase):
def setUp(self):
self.material = GenMaxwellPlaneStrain()
return
def test_constructor(self):
self.assertEqual(2, self.material.dimension())
return
def test_useElasticBehavior(self):
self.material.useElasticBehavior(False)
return
def testHasStateVars(self):
self.failUnless(self.material.hasStateVars())
return
def testTensorSize(self):
self.assertEqual(3, self.material.tensorSize())
return
def testNeedNewJacobian(self):
self.failIf(self.material.needNewJacobian())
self.material.timeStep(1.0)
self.material.timeStep(2.0)
self.failUnless(self.material.needNewJacobian())
return
def test_factory(self):
from pylith.materials.GenMaxwellPlaneStrain import material
m = material()
return
| true | true |
f71d25c8f6cc9cafcc01ba2df3d5a02e16aa646a | 8,510 | py | Python | models/wideresnet.py | creaiter/Classification-PyTorch | 2feabf4b3d0d561420399bdf65840a58af76069d | [
"MIT"
] | 1 | 2021-04-02T05:13:58.000Z | 2021-04-02T05:13:58.000Z | models/wideresnet.py | creaiter/Classification-PyTorch | 2feabf4b3d0d561420399bdf65840a58af76069d | [
"MIT"
] | null | null | null | models/wideresnet.py | creaiter/Classification-PyTorch | 2feabf4b3d0d561420399bdf65840a58af76069d | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def relu(inplace=False):
"""ReLU activation"""
return nn.ReLU(inplace=inplace)
def bn(num_features):
"""Batch normalization 2D"""
return nn.BatchNorm2d(num_features)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1):
super(BasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = bn(planes)
self.relu1 = relu(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = bn(planes)
self.relu2 = relu(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = bn(width)
self.relu1 = relu(inplace=True)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = bn(width)
self.relu2 = relu(inplace=True)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = bn(planes * self.expansion)
self.relu3 = relu(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class WideResNet_Cifar(nn.Module):
def __init__(self, block, layers, width_mult=1, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None):
super(WideResNet_Cifar, self).__init__()
self.block_name = str(block.__name__)
self.inplanes = 16
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = bn(self.inplanes)
self.relu1 = relu(inplace=False)
self.layer1 = self._make_layer(block, 16 * width_mult, layers[0])
self.layer2 = self._make_layer(block, 32 * width_mult, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 64 * width_mult, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion * width_mult, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
#nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
bn(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
# Model configurations
'''
model_cfgs = {
18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3]),
}
'''
model_cfgs_cifar = {
16: (BasicBlock, [2, 2, 2]),
22: (BasicBlock, [3, 3, 3]),
28: (BasicBlock, [4, 4, 4]),
40: (BasicBlock, [6, 6, 6]),
52: (BasicBlock, [8, 8, 8]),
}
def set_model(cfg):
r"""
Args:
cfg: configuration
"""
# set model configurations
if data in ['cifar10', 'cifar100']:
assert (cfg.layers - 4) % 6 == 0, "The number of layers should be 16, 22, 28, 40, 52, etc."
assert cfg.width_mult == int(cfg.width_mult), "The width multiplier should be an integer value."
n = int((cfg.layers - 4) / 6)
layers = [n, n, n]
image_size = 32
num_classes = int(cfg.dataset[5:])
model = WideResNet_Cifar(BasicBlock, layers, cfg.width_mult, num_classes)
elif data == 'imagenet':
model = None
image_size = None
raise Exception('Undefined dataset for WideResNet architecture.')
else:
raise Exception('Undefined dataset for WideResNet architecture.')
return model, image_size | 34.176707 | 106 | 0.593302 | import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def relu(inplace=False):
return nn.ReLU(inplace=inplace)
def bn(num_features):
return nn.BatchNorm2d(num_features)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1):
super(BasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = bn(planes)
self.relu1 = relu(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = bn(planes)
self.relu2 = relu(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = bn(width)
self.relu1 = relu(inplace=True)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = bn(width)
self.relu2 = relu(inplace=True)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = bn(planes * self.expansion)
self.relu3 = relu(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class WideResNet_Cifar(nn.Module):
def __init__(self, block, layers, width_mult=1, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None):
super(WideResNet_Cifar, self).__init__()
self.block_name = str(block.__name__)
self.inplanes = 16
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = bn(self.inplanes)
self.relu1 = relu(inplace=False)
self.layer1 = self._make_layer(block, 16 * width_mult, layers[0])
self.layer2 = self._make_layer(block, 32 * width_mult, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 64 * width_mult, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion * width_mult, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
bn(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
model_cfgs_cifar = {
16: (BasicBlock, [2, 2, 2]),
22: (BasicBlock, [3, 3, 3]),
28: (BasicBlock, [4, 4, 4]),
40: (BasicBlock, [6, 6, 6]),
52: (BasicBlock, [8, 8, 8]),
}
def set_model(cfg):
if data in ['cifar10', 'cifar100']:
assert (cfg.layers - 4) % 6 == 0, "The number of layers should be 16, 22, 28, 40, 52, etc."
assert cfg.width_mult == int(cfg.width_mult), "The width multiplier should be an integer value."
n = int((cfg.layers - 4) / 6)
layers = [n, n, n]
image_size = 32
num_classes = int(cfg.dataset[5:])
model = WideResNet_Cifar(BasicBlock, layers, cfg.width_mult, num_classes)
elif data == 'imagenet':
model = None
image_size = None
raise Exception('Undefined dataset for WideResNet architecture.')
else:
raise Exception('Undefined dataset for WideResNet architecture.')
return model, image_size | true | true |
f71d262f290551102b4dc1338a796fd3573855c9 | 1,069 | py | Python | scripts/hf_per_train_shard_tokenize.py | myutman/contracode | f2a589e1efd2788874fd0468d1ecc30d6a14c396 | [
"Apache-2.0"
] | 115 | 2020-07-10T02:01:34.000Z | 2022-03-27T06:21:43.000Z | scripts/hf_per_train_shard_tokenize.py | myutman/contracode | f2a589e1efd2788874fd0468d1ecc30d6a14c396 | [
"Apache-2.0"
] | 7 | 2020-08-05T00:25:17.000Z | 2021-12-26T17:06:31.000Z | scripts/hf_per_train_shard_tokenize.py | myutman/contracode | f2a589e1efd2788874fd0468d1ecc30d6a14c396 | [
"Apache-2.0"
] | 21 | 2020-07-14T11:31:47.000Z | 2022-03-25T06:10:18.000Z | import sys
import numpy as np
import pandas as pd
import multiprocessing as mp
from transformers import BertTokenizerFast
from tqdm import tqdm
if __name__ == "__main__":
assert len(sys.argv) == 2
data_shard_idx = int(sys.argv[1])
data_shard_path = f"/data/ajay/contracode/data/hf_data/train_chunks/augmented_pretrain_df.{data_shard_idx:04d}.train.pickle.gz"
data_shard_path_out = (
f"/data/ajay/contracode/data/hf_data/train_chunks_tokenized/augmented_pretrain_tokenized_df.{data_shard_idx:04d}.train.pickle.gz"
)
def load_tokenizer(path="data/vocab/8k_bpe/8k_bpe-vocab.txt"):
return BertTokenizerFast(path, clean_text=True, lowercase=False, strip_accents=True, unk_token="<unk>")
def load_data(path):
return pd.read_pickle(path)
tokenizer = load_tokenizer()
df_shard = load_data(data_shard_path)
tqdm.pandas()
df_shard["toks"] = df_shard["text"].progress_apply(lambda x: np.asarray(tokenizer.encode(x)))
df_shard = df_shard[["data_idx", "toks"]]
df_shard.to_pickle(data_shard_path_out)
| 36.862069 | 137 | 0.74275 | import sys
import numpy as np
import pandas as pd
import multiprocessing as mp
from transformers import BertTokenizerFast
from tqdm import tqdm
if __name__ == "__main__":
assert len(sys.argv) == 2
data_shard_idx = int(sys.argv[1])
data_shard_path = f"/data/ajay/contracode/data/hf_data/train_chunks/augmented_pretrain_df.{data_shard_idx:04d}.train.pickle.gz"
data_shard_path_out = (
f"/data/ajay/contracode/data/hf_data/train_chunks_tokenized/augmented_pretrain_tokenized_df.{data_shard_idx:04d}.train.pickle.gz"
)
def load_tokenizer(path="data/vocab/8k_bpe/8k_bpe-vocab.txt"):
return BertTokenizerFast(path, clean_text=True, lowercase=False, strip_accents=True, unk_token="<unk>")
def load_data(path):
return pd.read_pickle(path)
tokenizer = load_tokenizer()
df_shard = load_data(data_shard_path)
tqdm.pandas()
df_shard["toks"] = df_shard["text"].progress_apply(lambda x: np.asarray(tokenizer.encode(x)))
df_shard = df_shard[["data_idx", "toks"]]
df_shard.to_pickle(data_shard_path_out)
| true | true |
f71d2652af9afa17cad4beb33592ed002af1f665 | 4,040 | py | Python | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceEducateCampusBiztaskFinishModel import AlipayCommerceEducateCampusBiztaskFinishModel
class AlipayCommerceEducateCampusBiztaskFinishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceEducateCampusBiztaskFinishModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceEducateCampusBiztaskFinishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.biztask.finish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.862069 | 148 | 0.650495 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceEducateCampusBiztaskFinishModel import AlipayCommerceEducateCampusBiztaskFinishModel
class AlipayCommerceEducateCampusBiztaskFinishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceEducateCampusBiztaskFinishModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceEducateCampusBiztaskFinishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.biztask.finish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f71d265a9ef6949d78624bff82593373435b72c8 | 135 | py | Python | lang/python/test_sound.py | df3n5/cog | da76d2350f96d77aad7d431837302f004603c35f | [
"MIT"
] | null | null | null | lang/python/test_sound.py | df3n5/cog | da76d2350f96d77aad7d431837302f004603c35f | [
"MIT"
] | null | null | null | lang/python/test_sound.py | df3n5/cog | da76d2350f96d77aad7d431837302f004603c35f | [
"MIT"
] | null | null | null | import cog
cog.init()
snd = cog.snd_add("media/testsnd.wav")
cog.snd_play(snd)
while not cog.hasquit():
cog.loopstep()
cog.quit()
| 15 | 38 | 0.696296 | import cog
cog.init()
snd = cog.snd_add("media/testsnd.wav")
cog.snd_play(snd)
while not cog.hasquit():
cog.loopstep()
cog.quit()
| true | true |
f71d26f35e9c16f18e4f8f8585d88d4ae52e4b6f | 114 | py | Python | runworker.py | CodeForAfrica/geomancer | cfddbb451d2fdea75a15ceca09e7f5ec70fc853f | [
"MIT"
] | 45 | 2015-01-14T01:14:10.000Z | 2021-05-20T00:20:44.000Z | runworker.py | CodeForAfrica/geomancer | cfddbb451d2fdea75a15ceca09e7f5ec70fc853f | [
"MIT"
] | 38 | 2015-01-09T16:58:46.000Z | 2016-07-15T18:50:09.000Z | runworker.py | CodeForAfrica/geomancer | cfddbb451d2fdea75a15ceca09e7f5ec70fc853f | [
"MIT"
] | 6 | 2015-01-28T05:39:30.000Z | 2018-04-05T16:44:27.000Z | from geomancer.worker import queue_daemon
from geomancer import create_app
app = create_app()
queue_daemon(app)
| 16.285714 | 41 | 0.824561 | from geomancer.worker import queue_daemon
from geomancer import create_app
app = create_app()
queue_daemon(app)
| true | true |
f71d27fd646465b648a230e84292740cfbceac9f | 2,987 | py | Python | craid/eddb/system/System.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | null | null | null | craid/eddb/system/System.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | 2 | 2020-05-28T13:30:08.000Z | 2020-06-02T14:12:04.000Z | craid/eddb/system/System.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from datetime import datetime, timedelta
from craid.eddb.base.Aware import Aware
class System(Aware):
def __init__(self, jsonLine):
super().__init__(jsonLine['name'], jsonLine['id'])
self.x: float = float(jsonLine['x'])
self.y: float = float(jsonLine['y'])
self.z: float = float(jsonLine['z'])
self.needs_permit: bool = jsonLine.get('needs_permit')
self.updated_at: datetime = datetime.utcfromtimestamp(jsonLine['updated_at'])
def getX(self) -> float:
return self.x
def getY(self) -> float:
return self.y
def getZ(self) -> float:
return self.z
#
# Octant of galaxy measured from Etionses
#
def getOctant(self) -> int:
tmp: int = 0
if self.getX() > 49.5:
tmp += 1
if self.getY() > -104:
tmp += 2
if self.getZ() > 6.3:
tmp += 4
return tmp
def needsPermit(self) -> bool:
return self.needs_permit
def getUpdatedDateTime(self) -> datetime:
return self.updated_at
def getUpdatedString(self) -> str:
days: int = self.getDaysSinceScouted()
if days <= 1:
return "Scouted within the last day."
if days <= 6:
return "Scouted within the last " + str(days) + " days."
weeks = math.ceil(days / 7)
if weeks <= 6:
return "*Scouted " + str(weeks) + " weeks ago.*"
return "**Really, really needs to be scouted.**"
def getDaysSinceScouted(self) -> int:
upd = self.getUpdatedDateTime()
now = datetime.utcnow() # timezone.utc)
time_elapsed: timedelta = now - upd
days = time_elapsed.days
return days
def getInaraNearestShipyardUrl(self):
return "https://inara.cz/galaxy-nearest/14/" + str(self.get_id())
def getInaraSystemUrl(self):
return "https://inara.cz/galaxy-starsystem/" + str(self.get_id()) + "/"
def getEddbSystemUrl(self):
return "https://eddb.io/system/" + str(self.get_id())
def getRoadToRichesUrl(self):
return "http://edtools.ddns.net/expl.php?s=" # + urllib.parse.quote(self.get_name())
def getRegionColor(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionColor(self)
def getRegionName(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionName(self)
def getNearestRegionMessage(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getNearestRegionMessage(self)
def getRegionNumber(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionNumber(self)
| 29.87 | 93 | 0.627051 |
import math
from datetime import datetime, timedelta
from craid.eddb.base.Aware import Aware
class System(Aware):
def __init__(self, jsonLine):
super().__init__(jsonLine['name'], jsonLine['id'])
self.x: float = float(jsonLine['x'])
self.y: float = float(jsonLine['y'])
self.z: float = float(jsonLine['z'])
self.needs_permit: bool = jsonLine.get('needs_permit')
self.updated_at: datetime = datetime.utcfromtimestamp(jsonLine['updated_at'])
def getX(self) -> float:
return self.x
def getY(self) -> float:
return self.y
def getZ(self) -> float:
return self.z
def getOctant(self) -> int:
tmp: int = 0
if self.getX() > 49.5:
tmp += 1
if self.getY() > -104:
tmp += 2
if self.getZ() > 6.3:
tmp += 4
return tmp
def needsPermit(self) -> bool:
return self.needs_permit
def getUpdatedDateTime(self) -> datetime:
return self.updated_at
def getUpdatedString(self) -> str:
days: int = self.getDaysSinceScouted()
if days <= 1:
return "Scouted within the last day."
if days <= 6:
return "Scouted within the last " + str(days) + " days."
weeks = math.ceil(days / 7)
if weeks <= 6:
return "*Scouted " + str(weeks) + " weeks ago.*"
return "**Really, really needs to be scouted.**"
def getDaysSinceScouted(self) -> int:
upd = self.getUpdatedDateTime()
now = datetime.utcnow()
time_elapsed: timedelta = now - upd
days = time_elapsed.days
return days
def getInaraNearestShipyardUrl(self):
return "https://inara.cz/galaxy-nearest/14/" + str(self.get_id())
def getInaraSystemUrl(self):
return "https://inara.cz/galaxy-starsystem/" + str(self.get_id()) + "/"
def getEddbSystemUrl(self):
return "https://eddb.io/system/" + str(self.get_id())
def getRoadToRichesUrl(self):
return "http://edtools.ddns.net/expl.php?s="
def getRegionColor(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionColor(self)
def getRegionName(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionName(self)
def getNearestRegionMessage(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getNearestRegionMessage(self)
def getRegionNumber(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionNumber(self)
| true | true |
f71d2a4a74c4053610f3decc8970971a3192cc9f | 11,567 | py | Python | neutron/db/dns_db.py | MultipleCrashes/neutron | fb268d7e91b22192a6e42f78b0057b4ebd3033ef | [
"Apache-2.0"
] | 1 | 2019-06-02T06:15:39.000Z | 2019-06-02T06:15:39.000Z | neutron/db/dns_db.py | MultipleCrashes/neutron | fb268d7e91b22192a6e42f78b0057b4ebd3033ef | [
"Apache-2.0"
] | null | null | null | neutron/db/dns_db.py | MultipleCrashes/neutron | fb268d7e91b22192a6e42f78b0057b4ebd3033ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _, _LE
from neutron.common import utils
from neutron.db import _resource_extend as resource_extend
from neutron.extensions import dns
from neutron.extensions import l3
from neutron.objects import floatingip as fip_obj
from neutron.objects import network
from neutron.objects import ports as port_obj
from neutron.services.externaldns import driver
LOG = logging.getLogger(__name__)
class DNSActionsData(object):
def __init__(self, current_dns_name=None, current_dns_domain=None,
previous_dns_name=None, previous_dns_domain=None):
self.current_dns_name = current_dns_name
self.current_dns_domain = current_dns_domain
self.previous_dns_name = previous_dns_name
self.previous_dns_domain = previous_dns_domain
class DNSDbMixin(object):
"""Mixin class to add DNS methods to db_base_plugin_v2."""
_dns_driver = None
@property
def dns_driver(self):
if self._dns_driver:
return self._dns_driver
if not cfg.CONF.external_dns_driver:
return
try:
self._dns_driver = driver.ExternalDNSService.get_instance()
LOG.debug("External DNS driver loaded: %s",
cfg.CONF.external_dns_driver)
return self._dns_driver
except ImportError:
LOG.exception(_LE("ImportError exception occurred while loading "
"the external DNS service driver"))
raise dns.ExternalDNSDriverNotFound(
driver=cfg.CONF.external_dns_driver)
def _extend_floatingip_dict_dns(self, floatingip_res, floatingip_db):
floatingip_res['dns_domain'] = ''
floatingip_res['dns_name'] = ''
if floatingip_db.dns:
floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain']
floatingip_res['dns_name'] = floatingip_db.dns['dns_name']
return floatingip_res
resource_extend.register_funcs(
l3.FLOATINGIPS, ['_extend_floatingip_dict_dns'])
def _process_dns_floatingip_create_precommit(self, context,
floatingip_data, req_data):
# expects to be called within a plugin's session
dns_domain = req_data.get(dns.DNSDOMAIN)
if not validators.is_attr_set(dns_domain):
return
if not self.dns_driver:
return
dns_name = req_data[dns.DNSNAME]
self._validate_floatingip_dns(dns_name, dns_domain)
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_create(
context, floatingip_data, req_data))
dns_actions_data = None
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
floatingip_id=floatingip_data['id'],
dns_name=req_data[dns.DNSNAME],
dns_domain=req_data[dns.DNSDOMAIN],
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
dns_actions_data = DNSActionsData(
current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
floatingip_data['dns_name'] = dns_name
floatingip_data['dns_domain'] = dns_domain
return dns_actions_data
def _process_dns_floatingip_create_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_update_precommit(self, context,
floatingip_data):
# expects to be called within a plugin's session
if not utils.is_extension_supported(self._core_plugin,
dns.Dns.get_alias()):
return
if not self.dns_driver:
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(
context, floatingip_id=floatingip_data['id'])
if dns_data_db and dns_data_db['dns_name']:
# dns_name and dns_domain assigned for floating ip. It doesn't
# matter whether they are defined for internal port
return
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_update(
context, floatingip_data))
if dns_data_db:
if (dns_data_db['published_dns_name'] != current_dns_name or
dns_data_db['published_dns_domain'] != current_dns_domain):
dns_actions_data = DNSActionsData(
previous_dns_name=dns_data_db['published_dns_name'],
previous_dns_domain=dns_data_db['published_dns_domain'])
if current_dns_name and current_dns_domain:
dns_data_db['published_dns_name'] = current_dns_name
dns_data_db['published_dns_domain'] = current_dns_domain
dns_actions_data.current_dns_name = current_dns_name
dns_actions_data.current_dns_domain = current_dns_domain
else:
dns_data_db.delete()
return dns_actions_data
else:
return
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
floatingip_id=floatingip_data['id'],
dns_name='',
dns_domain='',
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
return DNSActionsData(current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
def _process_dns_floatingip_update_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
if dns_actions_data.previous_dns_name:
self._delete_floatingip_from_external_dns_service(
context, dns_actions_data.previous_dns_domain,
dns_actions_data.previous_dns_name,
[floatingip_data['floating_ip_address']])
if dns_actions_data.current_dns_name:
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_delete(self, context, floatingip_data):
if not utils.is_extension_supported(self._core_plugin,
dns.Dns.get_alias()):
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(context,
floatingip_id=floatingip_data['id'])
if dns_data_db:
self._delete_floatingip_from_external_dns_service(
context, dns_data_db['published_dns_domain'],
dns_data_db['published_dns_name'],
[floatingip_data['floating_ip_address']])
def _validate_floatingip_dns(self, dns_name, dns_domain):
if dns_domain and not dns_name:
msg = _("dns_domain cannot be specified without a dns_name")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if dns_name and not dns_domain:
msg = _("dns_name cannot be specified without a dns_domain")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
def _get_internal_port_dns_data(self, context, floatingip_data):
port_dns = port_obj.PortDNS.get_object(
context, port_id=floatingip_data['port_id'])
if not (port_dns and port_dns['dns_name']):
return None, None
net_dns = network.NetworkDNSDomain.get_net_dns_from_port(
context=context, port_id=floatingip_data['port_id'])
if not net_dns:
return port_dns['dns_name'], None
return port_dns['dns_name'], net_dns['dns_domain']
def _delete_floatingip_from_external_dns_service(self, context, dns_domain,
dns_name, records):
try:
self.dns_driver.delete_record_set(context, dns_domain, dns_name,
records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error deleting Floating IP data from external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. IP addresses '%(ips)s'. DNS "
"service driver message '%(message)s'"),
{"name": dns_name,
"domain": dns_domain,
"message": e.msg,
"ips": ', '.join(records)})
def _get_requested_state_for_external_dns_service_create(self, context,
floatingip_data,
req_data):
fip_dns_name = req_data[dns.DNSNAME]
if fip_dns_name:
return fip_dns_name, req_data[dns.DNSDOMAIN]
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _get_requested_state_for_external_dns_service_update(self, context,
floatingip_data):
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name,
records):
try:
self.dns_driver.create_record_set(context, dns_domain, dns_name,
records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error publishing floating IP data in external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. DNS service driver message "
"'%(message)s'"),
{"name": dns_name,
"domain": dns_domain,
"message": e.msg})
| 46.083665 | 79 | 0.610876 |
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _, _LE
from neutron.common import utils
from neutron.db import _resource_extend as resource_extend
from neutron.extensions import dns
from neutron.extensions import l3
from neutron.objects import floatingip as fip_obj
from neutron.objects import network
from neutron.objects import ports as port_obj
from neutron.services.externaldns import driver
LOG = logging.getLogger(__name__)
class DNSActionsData(object):
def __init__(self, current_dns_name=None, current_dns_domain=None,
previous_dns_name=None, previous_dns_domain=None):
self.current_dns_name = current_dns_name
self.current_dns_domain = current_dns_domain
self.previous_dns_name = previous_dns_name
self.previous_dns_domain = previous_dns_domain
class DNSDbMixin(object):
_dns_driver = None
@property
def dns_driver(self):
if self._dns_driver:
return self._dns_driver
if not cfg.CONF.external_dns_driver:
return
try:
self._dns_driver = driver.ExternalDNSService.get_instance()
LOG.debug("External DNS driver loaded: %s",
cfg.CONF.external_dns_driver)
return self._dns_driver
except ImportError:
LOG.exception(_LE("ImportError exception occurred while loading "
"the external DNS service driver"))
raise dns.ExternalDNSDriverNotFound(
driver=cfg.CONF.external_dns_driver)
def _extend_floatingip_dict_dns(self, floatingip_res, floatingip_db):
floatingip_res['dns_domain'] = ''
floatingip_res['dns_name'] = ''
if floatingip_db.dns:
floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain']
floatingip_res['dns_name'] = floatingip_db.dns['dns_name']
return floatingip_res
resource_extend.register_funcs(
l3.FLOATINGIPS, ['_extend_floatingip_dict_dns'])
def _process_dns_floatingip_create_precommit(self, context,
floatingip_data, req_data):
dns_domain = req_data.get(dns.DNSDOMAIN)
if not validators.is_attr_set(dns_domain):
return
if not self.dns_driver:
return
dns_name = req_data[dns.DNSNAME]
self._validate_floatingip_dns(dns_name, dns_domain)
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_create(
context, floatingip_data, req_data))
dns_actions_data = None
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
floatingip_id=floatingip_data['id'],
dns_name=req_data[dns.DNSNAME],
dns_domain=req_data[dns.DNSDOMAIN],
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
dns_actions_data = DNSActionsData(
current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
floatingip_data['dns_name'] = dns_name
floatingip_data['dns_domain'] = dns_domain
return dns_actions_data
def _process_dns_floatingip_create_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_update_precommit(self, context,
floatingip_data):
# expects to be called within a plugin's session
if not utils.is_extension_supported(self._core_plugin,
dns.Dns.get_alias()):
return
if not self.dns_driver:
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(
context, floatingip_id=floatingip_data['id'])
if dns_data_db and dns_data_db['dns_name']:
# matter whether they are defined for internal port
return
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_update(
context, floatingip_data))
if dns_data_db:
if (dns_data_db['published_dns_name'] != current_dns_name or
dns_data_db['published_dns_domain'] != current_dns_domain):
dns_actions_data = DNSActionsData(
previous_dns_name=dns_data_db['published_dns_name'],
previous_dns_domain=dns_data_db['published_dns_domain'])
if current_dns_name and current_dns_domain:
dns_data_db['published_dns_name'] = current_dns_name
dns_data_db['published_dns_domain'] = current_dns_domain
dns_actions_data.current_dns_name = current_dns_name
dns_actions_data.current_dns_domain = current_dns_domain
else:
dns_data_db.delete()
return dns_actions_data
else:
return
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
floatingip_id=floatingip_data['id'],
dns_name='',
dns_domain='',
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
return DNSActionsData(current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
def _process_dns_floatingip_update_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
if dns_actions_data.previous_dns_name:
self._delete_floatingip_from_external_dns_service(
context, dns_actions_data.previous_dns_domain,
dns_actions_data.previous_dns_name,
[floatingip_data['floating_ip_address']])
if dns_actions_data.current_dns_name:
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_delete(self, context, floatingip_data):
if not utils.is_extension_supported(self._core_plugin,
dns.Dns.get_alias()):
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(context,
floatingip_id=floatingip_data['id'])
if dns_data_db:
self._delete_floatingip_from_external_dns_service(
context, dns_data_db['published_dns_domain'],
dns_data_db['published_dns_name'],
[floatingip_data['floating_ip_address']])
def _validate_floatingip_dns(self, dns_name, dns_domain):
if dns_domain and not dns_name:
msg = _("dns_domain cannot be specified without a dns_name")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if dns_name and not dns_domain:
msg = _("dns_name cannot be specified without a dns_domain")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
def _get_internal_port_dns_data(self, context, floatingip_data):
port_dns = port_obj.PortDNS.get_object(
context, port_id=floatingip_data['port_id'])
if not (port_dns and port_dns['dns_name']):
return None, None
net_dns = network.NetworkDNSDomain.get_net_dns_from_port(
context=context, port_id=floatingip_data['port_id'])
if not net_dns:
return port_dns['dns_name'], None
return port_dns['dns_name'], net_dns['dns_domain']
def _delete_floatingip_from_external_dns_service(self, context, dns_domain,
dns_name, records):
try:
self.dns_driver.delete_record_set(context, dns_domain, dns_name,
records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error deleting Floating IP data from external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. IP addresses '%(ips)s'. DNS "
"service driver message '%(message)s'"),
{"name": dns_name,
"domain": dns_domain,
"message": e.msg,
"ips": ', '.join(records)})
def _get_requested_state_for_external_dns_service_create(self, context,
floatingip_data,
req_data):
fip_dns_name = req_data[dns.DNSNAME]
if fip_dns_name:
return fip_dns_name, req_data[dns.DNSDOMAIN]
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _get_requested_state_for_external_dns_service_update(self, context,
floatingip_data):
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name,
records):
try:
self.dns_driver.create_record_set(context, dns_domain, dns_name,
records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error publishing floating IP data in external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. DNS service driver message "
"'%(message)s'"),
{"name": dns_name,
"domain": dns_domain,
"message": e.msg})
| true | true |
f71d2b0293d434fafa7ef64fc32c22429de5476f | 4,759 | py | Python | mars/tensor/fft/irfft.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/tensor/fft/irfft.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/tensor/fft/irfft.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorFFTMixin, validate_fft, TensorRealFFT
class TensorIRFFT(TensorRealFFT, TensorFFTMixin):
_op_type_ = OperandDef.IRFFT
def __init__(self, n=None, axis=-1, norm=None, **kw):
super().__init__(_n=n, _axis=axis, _norm=norm, **kw)
@classmethod
def _get_shape(cls, op, shape):
new_shape = list(shape)
if op.n is not None:
new_shape[op.axis] = op.n
else:
new_shape[op.axis] = 2 * (new_shape[op.axis] - 1)
return tuple(new_shape)
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input tensor.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
mt.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> import mars.tenosr as mt
>>> mt.fft.ifft([1, -1j, -1, 1j]).execute()
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> mt.fft.irfft([1, -1j, -1]).execute()
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = astensor(a)
validate_fft(a, axis=axis, norm=norm)
op = TensorIRFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| 38.379032 | 79 | 0.664425 |
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorFFTMixin, validate_fft, TensorRealFFT
class TensorIRFFT(TensorRealFFT, TensorFFTMixin):
_op_type_ = OperandDef.IRFFT
def __init__(self, n=None, axis=-1, norm=None, **kw):
super().__init__(_n=n, _axis=axis, _norm=norm, **kw)
@classmethod
def _get_shape(cls, op, shape):
new_shape = list(shape)
if op.n is not None:
new_shape[op.axis] = op.n
else:
new_shape[op.axis] = 2 * (new_shape[op.axis] - 1)
return tuple(new_shape)
def irfft(a, n=None, axis=-1, norm=None):
a = astensor(a)
validate_fft(a, axis=axis, norm=norm)
op = TensorIRFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| true | true |
f71d2bdb30bfd7496e7f2d476ade9dc68afbafb0 | 15,382 | py | Python | heat/engine/rsrc_defn.py | hongbin/heat | 1a8495eaa728d36ccff4b0285befb5a02e31b3de | [
"Apache-2.0"
] | null | null | null | heat/engine/rsrc_defn.py | hongbin/heat | 1a8495eaa728d36ccff4b0285befb5a02e31b3de | [
"Apache-2.0"
] | null | null | null | heat/engine/rsrc_defn.py | hongbin/heat | 1a8495eaa728d36ccff4b0285befb5a02e31b3de | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import operator
import six
from heat.common import exception
from heat.common.i18n import repr_wrapper
from heat.engine import function
from heat.engine import properties
__all__ = ['ResourceDefinition']
@repr_wrapper
class ResourceDefinition(object):
"""A definition of a resource, independent of any template format."""
class Diff(object):
"""A diff between two versions of the same resource definition."""
def __init__(self, old_defn, new_defn):
if not (isinstance(old_defn, ResourceDefinition) and
isinstance(new_defn, ResourceDefinition)):
raise TypeError
self.old_defn = old_defn
self.new_defn = new_defn
def properties_changed(self):
"""Return True if the resource properties have changed."""
return self.old_defn._properties != self.new_defn._properties
def metadata_changed(self):
"""Return True if the resource metadata has changed."""
return self.old_defn._metadata != self.new_defn._metadata
def update_policy_changed(self):
"""Return True if the resource update policy has changed."""
return self.old_defn._update_policy != self.new_defn._update_policy
def __bool__(self):
"""Return True if anything has changed."""
return (self.properties_changed() or
self.metadata_changed() or
self.update_policy_changed())
__nonzero__ = __bool__
DELETION_POLICIES = (
DELETE, RETAIN, SNAPSHOT,
) = (
'Delete', 'Retain', 'Snapshot',
)
def __init__(self, name, resource_type, properties=None, metadata=None,
depends=None, deletion_policy=None, update_policy=None,
description=None, external_id=None, condition=None):
"""Initialise with the parsed definition of a resource.
Any intrinsic functions present in any of the sections should have been
parsed into Function objects before constructing the definition.
:param name: The name of the resource (for use in error messages)
:param resource_type: The resource type
:param properties: A dictionary of supplied property values
:param metadata: The supplied metadata
:param depends: A list of resource names on which this resource depends
:param deletion_policy: The deletion policy for the resource
:param update_policy: A dictionary of supplied update policies
:param description: A string describing the resource
:param external_id: A uuid of an external resource
:param condition: A condition name associated with the resource
"""
self.name = name
self.resource_type = resource_type
self.description = description or ''
self._properties = properties
self._metadata = metadata
self._depends = depends
self._deletion_policy = deletion_policy
self._update_policy = update_policy
self._external_id = external_id
self._condition = condition
self._hash = hash(self.resource_type)
self._rendering = None
assert isinstance(self.description, six.string_types)
if properties is not None:
assert isinstance(properties, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(properties)
if metadata is not None:
assert isinstance(metadata, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(metadata)
if depends is not None:
assert isinstance(depends, (collections.Sequence,
function.Function))
assert not isinstance(depends, six.string_types)
self._hash ^= _hash_data(depends)
if deletion_policy is not None:
assert deletion_policy in self.DELETION_POLICIES
self._hash ^= _hash_data(deletion_policy)
if update_policy is not None:
assert isinstance(update_policy, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(update_policy)
if external_id is not None:
assert isinstance(external_id, (six.string_types,
function.Function))
self._hash ^= _hash_data(external_id)
self._deletion_policy = self.RETAIN
if condition is not None:
assert isinstance(condition, (six.string_types, bool,
function.Function))
self._hash ^= _hash_data(condition)
self.set_translation_rules()
def freeze(self, **overrides):
"""Return a frozen resource definition, with all functions resolved.
This return a new resource definition with fixed data (containing no
intrinsic functions). Named arguments passed to this method override
the values passed as arguments to the constructor.
"""
if getattr(self, '_frozen', False) and not overrides:
return self
def arg_item(attr_name):
name = attr_name.lstrip('_')
if name in overrides:
value = overrides[name]
if not value and getattr(self, attr_name) is None:
value = None
else:
value = function.resolve(getattr(self, attr_name))
return name, value
args = ('name', 'resource_type', '_properties', '_metadata',
'_depends', '_deletion_policy', '_update_policy',
'description', '_external_id', '_condition')
defn = type(self)(**dict(arg_item(a) for a in args))
defn._frozen = True
return defn
def reparse(self, stack, template):
"""Reinterpret the resource definition in the context of a new stack.
This returns a new resource definition, with all of the functions
parsed in the context of the specified stack and template.
"""
assert not getattr(self, '_frozen', False
), "Cannot re-parse a frozen definition"
def reparse_snippet(snippet):
return template.parse(stack, copy.deepcopy(snippet))
return type(self)(
self.name, self.resource_type,
properties=reparse_snippet(self._properties),
metadata=reparse_snippet(self._metadata),
depends=reparse_snippet(self._depends),
deletion_policy=reparse_snippet(self._deletion_policy),
update_policy=reparse_snippet(self._update_policy),
external_id=reparse_snippet(self._external_id),
condition=self._condition)
def dep_attrs(self, resource_name):
"""Iterate over attributes of a given resource that this references.
Return an iterator over dependent attributes for specified
resource_name in resources' properties and metadata fields.
"""
return itertools.chain(function.dep_attrs(self._properties,
resource_name),
function.dep_attrs(self._metadata,
resource_name))
def dependencies(self, stack):
"""Return the Resource objects in given stack on which this depends."""
def path(section):
return '.'.join([self.name, section])
def get_resource(res_name):
if res_name not in stack:
if res_name in stack.t.get(stack.t.RESOURCES):
# The resource is conditionally defined, allow dependencies
# on it
return
raise exception.InvalidTemplateReference(resource=res_name,
key=self.name)
return stack[res_name]
def strict_func_deps(data, datapath):
return six.moves.filter(
lambda r: getattr(r, 'strict_dependency', True),
function.dependencies(data, datapath))
explicit_depends = [] if self._depends is None else self._depends
prop_deps = strict_func_deps(self._properties, path('Properties'))
metadata_deps = strict_func_deps(self._metadata, path('Metadata'))
# (ricolin) External resource should not depend on any other resources.
# This operation is not allowed for now.
if self.external_id():
if explicit_depends:
raise exception.InvalidExternalResourceDependency(
external_id=self.external_id(),
resource_type=self.resource_type
)
return itertools.chain()
return itertools.chain(
filter(None, (get_resource(dep) for dep in explicit_depends)),
prop_deps, metadata_deps)
def set_translation_rules(self, rules=None, client_resolve=True):
"""Helper method to update properties with translation rules."""
self._rules = rules or []
self._client_resolve = client_resolve
def properties(self, schema, context=None):
"""Return a Properties object representing the resource properties.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
props = properties.Properties(schema, self._properties or {},
function.resolve, context=context,
section='Properties')
props.update_translation(self._rules, self._client_resolve)
return props
def deletion_policy(self):
"""Return the deletion policy for the resource.
The policy will be one of those listed in DELETION_POLICIES.
"""
return function.resolve(self._deletion_policy) or self.DELETE
def update_policy(self, schema, context=None):
"""Return a Properties object representing the resource update policy.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
props = properties.Properties(schema, self._update_policy or {},
function.resolve, context=context,
section='UpdatePolicy')
props.update_translation(self._rules, self._client_resolve)
return props
def metadata(self):
"""Return the resource metadata."""
return function.resolve(self._metadata) or {}
def external_id(self):
"""Return the external resource id."""
return function.resolve(self._external_id)
def condition(self):
"""Return the name of the conditional inclusion rule, if any.
Returns None if the resource is included unconditionally.
"""
return function.resolve(self._condition)
def render_hot(self):
"""Return a HOT snippet for the resource definition."""
if self._rendering is None:
attrs = {
'type': 'resource_type',
'properties': '_properties',
'metadata': '_metadata',
'deletion_policy': '_deletion_policy',
'update_policy': '_update_policy',
'depends_on': '_depends',
'external_id': '_external_id',
'condition': '_condition'
}
def rawattrs():
"""Get an attribute with function objects stripped out."""
for key, attr in attrs.items():
value = getattr(self, attr)
if value is not None:
yield key, copy.deepcopy(value)
self._rendering = dict(rawattrs())
return self._rendering
def __sub__(self, previous):
"""Calculate the difference between this definition and a previous one.
Return a Diff object that can be used to establish differences between
this definition and a previous definition of the same resource.
"""
if not isinstance(previous, ResourceDefinition):
return NotImplemented
return self.Diff(previous, self)
def __eq__(self, other):
"""Compare this resource definition for equality with another.
Two resource definitions are considered to be equal if they can be
generated from the same template snippet. The name of the resource is
ignored, as are the actual values that any included functions resolve
to.
"""
if not isinstance(other, ResourceDefinition):
return NotImplemented
return self.render_hot() == other.render_hot()
def __ne__(self, other):
"""Compare this resource definition for inequality with another.
See __eq__() for the definition of equality.
"""
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __hash__(self):
"""Return a hash value for this resource definition.
Resource definitions that compare equal will have the same hash. (In
particular, the resource name is *not* taken into account.) See
the __eq__() method for the definition of equality.
"""
return self._hash
def __repr__(self):
"""Return a string representation of the resource definition."""
def arg_repr(arg_name):
return '='.join([arg_name, repr(getattr(self, '_%s' % arg_name))])
args = ('properties', 'metadata', 'depends',
'deletion_policy', 'update_policy', 'condition')
data = {
'classname': type(self).__name__,
'name': repr(self.name),
'type': repr(self.resource_type),
'args': ', '.join(arg_repr(n) for n in args)
}
return '%(classname)s(%(name)s, %(type)s, %(args)s)' % data
def _hash_data(data):
"""Return a stable hash value for an arbitrary parsed-JSON data snippet."""
if isinstance(data, function.Function):
data = copy.deepcopy(data)
if not isinstance(data, six.string_types):
if isinstance(data, collections.Sequence):
return hash(tuple(_hash_data(d) for d in data))
if isinstance(data, collections.Mapping):
item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
return six.moves.reduce(operator.xor, item_hashes, 0)
return hash(data)
| 39.040609 | 79 | 0.614484 |
import collections
import copy
import itertools
import operator
import six
from heat.common import exception
from heat.common.i18n import repr_wrapper
from heat.engine import function
from heat.engine import properties
__all__ = ['ResourceDefinition']
@repr_wrapper
class ResourceDefinition(object):
class Diff(object):
def __init__(self, old_defn, new_defn):
if not (isinstance(old_defn, ResourceDefinition) and
isinstance(new_defn, ResourceDefinition)):
raise TypeError
self.old_defn = old_defn
self.new_defn = new_defn
def properties_changed(self):
return self.old_defn._properties != self.new_defn._properties
def metadata_changed(self):
return self.old_defn._metadata != self.new_defn._metadata
def update_policy_changed(self):
return self.old_defn._update_policy != self.new_defn._update_policy
def __bool__(self):
return (self.properties_changed() or
self.metadata_changed() or
self.update_policy_changed())
__nonzero__ = __bool__
DELETION_POLICIES = (
DELETE, RETAIN, SNAPSHOT,
) = (
'Delete', 'Retain', 'Snapshot',
)
def __init__(self, name, resource_type, properties=None, metadata=None,
depends=None, deletion_policy=None, update_policy=None,
description=None, external_id=None, condition=None):
self.name = name
self.resource_type = resource_type
self.description = description or ''
self._properties = properties
self._metadata = metadata
self._depends = depends
self._deletion_policy = deletion_policy
self._update_policy = update_policy
self._external_id = external_id
self._condition = condition
self._hash = hash(self.resource_type)
self._rendering = None
assert isinstance(self.description, six.string_types)
if properties is not None:
assert isinstance(properties, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(properties)
if metadata is not None:
assert isinstance(metadata, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(metadata)
if depends is not None:
assert isinstance(depends, (collections.Sequence,
function.Function))
assert not isinstance(depends, six.string_types)
self._hash ^= _hash_data(depends)
if deletion_policy is not None:
assert deletion_policy in self.DELETION_POLICIES
self._hash ^= _hash_data(deletion_policy)
if update_policy is not None:
assert isinstance(update_policy, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(update_policy)
if external_id is not None:
assert isinstance(external_id, (six.string_types,
function.Function))
self._hash ^= _hash_data(external_id)
self._deletion_policy = self.RETAIN
if condition is not None:
assert isinstance(condition, (six.string_types, bool,
function.Function))
self._hash ^= _hash_data(condition)
self.set_translation_rules()
def freeze(self, **overrides):
if getattr(self, '_frozen', False) and not overrides:
return self
def arg_item(attr_name):
name = attr_name.lstrip('_')
if name in overrides:
value = overrides[name]
if not value and getattr(self, attr_name) is None:
value = None
else:
value = function.resolve(getattr(self, attr_name))
return name, value
args = ('name', 'resource_type', '_properties', '_metadata',
'_depends', '_deletion_policy', '_update_policy',
'description', '_external_id', '_condition')
defn = type(self)(**dict(arg_item(a) for a in args))
defn._frozen = True
return defn
def reparse(self, stack, template):
assert not getattr(self, '_frozen', False
), "Cannot re-parse a frozen definition"
def reparse_snippet(snippet):
return template.parse(stack, copy.deepcopy(snippet))
return type(self)(
self.name, self.resource_type,
properties=reparse_snippet(self._properties),
metadata=reparse_snippet(self._metadata),
depends=reparse_snippet(self._depends),
deletion_policy=reparse_snippet(self._deletion_policy),
update_policy=reparse_snippet(self._update_policy),
external_id=reparse_snippet(self._external_id),
condition=self._condition)
def dep_attrs(self, resource_name):
return itertools.chain(function.dep_attrs(self._properties,
resource_name),
function.dep_attrs(self._metadata,
resource_name))
def dependencies(self, stack):
def path(section):
return '.'.join([self.name, section])
def get_resource(res_name):
if res_name not in stack:
if res_name in stack.t.get(stack.t.RESOURCES):
return
raise exception.InvalidTemplateReference(resource=res_name,
key=self.name)
return stack[res_name]
def strict_func_deps(data, datapath):
return six.moves.filter(
lambda r: getattr(r, 'strict_dependency', True),
function.dependencies(data, datapath))
explicit_depends = [] if self._depends is None else self._depends
prop_deps = strict_func_deps(self._properties, path('Properties'))
metadata_deps = strict_func_deps(self._metadata, path('Metadata'))
if self.external_id():
if explicit_depends:
raise exception.InvalidExternalResourceDependency(
external_id=self.external_id(),
resource_type=self.resource_type
)
return itertools.chain()
return itertools.chain(
filter(None, (get_resource(dep) for dep in explicit_depends)),
prop_deps, metadata_deps)
def set_translation_rules(self, rules=None, client_resolve=True):
self._rules = rules or []
self._client_resolve = client_resolve
def properties(self, schema, context=None):
props = properties.Properties(schema, self._properties or {},
function.resolve, context=context,
section='Properties')
props.update_translation(self._rules, self._client_resolve)
return props
def deletion_policy(self):
return function.resolve(self._deletion_policy) or self.DELETE
def update_policy(self, schema, context=None):
props = properties.Properties(schema, self._update_policy or {},
function.resolve, context=context,
section='UpdatePolicy')
props.update_translation(self._rules, self._client_resolve)
return props
def metadata(self):
return function.resolve(self._metadata) or {}
def external_id(self):
return function.resolve(self._external_id)
def condition(self):
return function.resolve(self._condition)
def render_hot(self):
if self._rendering is None:
attrs = {
'type': 'resource_type',
'properties': '_properties',
'metadata': '_metadata',
'deletion_policy': '_deletion_policy',
'update_policy': '_update_policy',
'depends_on': '_depends',
'external_id': '_external_id',
'condition': '_condition'
}
def rawattrs():
for key, attr in attrs.items():
value = getattr(self, attr)
if value is not None:
yield key, copy.deepcopy(value)
self._rendering = dict(rawattrs())
return self._rendering
def __sub__(self, previous):
if not isinstance(previous, ResourceDefinition):
return NotImplemented
return self.Diff(previous, self)
def __eq__(self, other):
if not isinstance(other, ResourceDefinition):
return NotImplemented
return self.render_hot() == other.render_hot()
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __hash__(self):
return self._hash
def __repr__(self):
def arg_repr(arg_name):
return '='.join([arg_name, repr(getattr(self, '_%s' % arg_name))])
args = ('properties', 'metadata', 'depends',
'deletion_policy', 'update_policy', 'condition')
data = {
'classname': type(self).__name__,
'name': repr(self.name),
'type': repr(self.resource_type),
'args': ', '.join(arg_repr(n) for n in args)
}
return '%(classname)s(%(name)s, %(type)s, %(args)s)' % data
def _hash_data(data):
if isinstance(data, function.Function):
data = copy.deepcopy(data)
if not isinstance(data, six.string_types):
if isinstance(data, collections.Sequence):
return hash(tuple(_hash_data(d) for d in data))
if isinstance(data, collections.Mapping):
item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
return six.moves.reduce(operator.xor, item_hashes, 0)
return hash(data)
| true | true |
f71d2e5f7da8bfc1c7ac7dd7bd610f39b702b7fb | 717 | py | Python | examples/resize.py | penguinflys/imgviz | 3deadced1fcce8ca51716c705d07a058b1839514 | [
"MIT"
] | 171 | 2018-12-28T23:40:01.000Z | 2022-03-29T14:55:27.000Z | examples/resize.py | penguinflys/imgviz | 3deadced1fcce8ca51716c705d07a058b1839514 | [
"MIT"
] | 16 | 2018-12-29T16:21:15.000Z | 2022-03-09T15:36:06.000Z | examples/resize.py | penguinflys/imgviz | 3deadced1fcce8ca51716c705d07a058b1839514 | [
"MIT"
] | 23 | 2018-12-29T13:11:18.000Z | 2022-02-06T15:18:42.000Z | #!/usr/bin/env python
import matplotlib.pyplot as plt
import imgviz
def resize():
data = imgviz.data.arc2017()
rgb = data["rgb"]
H, W = rgb.shape[:2]
rgb_resized = imgviz.resize(rgb, height=0.1)
# -------------------------------------------------------------------------
plt.figure(dpi=200)
plt.subplot(121)
plt.title("rgb:\n{}".format(rgb.shape))
plt.imshow(rgb)
plt.axis("off")
plt.subplot(122)
plt.title("rgb_resized:\n{}".format(rgb_resized.shape))
plt.imshow(rgb_resized)
plt.axis("off")
img = imgviz.io.pyplot_to_numpy()
plt.close()
return img
if __name__ == "__main__":
from base import run_example
run_example(resize)
| 17.925 | 79 | 0.562064 |
import matplotlib.pyplot as plt
import imgviz
def resize():
data = imgviz.data.arc2017()
rgb = data["rgb"]
H, W = rgb.shape[:2]
rgb_resized = imgviz.resize(rgb, height=0.1)
plt.figure(dpi=200)
plt.subplot(121)
plt.title("rgb:\n{}".format(rgb.shape))
plt.imshow(rgb)
plt.axis("off")
plt.subplot(122)
plt.title("rgb_resized:\n{}".format(rgb_resized.shape))
plt.imshow(rgb_resized)
plt.axis("off")
img = imgviz.io.pyplot_to_numpy()
plt.close()
return img
if __name__ == "__main__":
from base import run_example
run_example(resize)
| true | true |
f71d2ecf99d8f24a2d9f3cb1384127d27bf34e53 | 9,072 | py | Python | src/neuralnetsim/simulation.py | Nathaniel-Rodriguez/neuralnetsim | c353af92fb3f44539370220963b07bdfd9822149 | [
"MIT"
] | null | null | null | src/neuralnetsim/simulation.py | Nathaniel-Rodriguez/neuralnetsim | c353af92fb3f44539370220963b07bdfd9822149 | [
"MIT"
] | null | null | null | src/neuralnetsim/simulation.py | Nathaniel-Rodriguez/neuralnetsim | c353af92fb3f44539370220963b07bdfd9822149 | [
"MIT"
] | null | null | null | __all__ = ["simulate_model",
"simulate_grid",
"simulate_orig"]
import neuralnetsim
import networkx as nx
import numpy as np
from distributed import Client
from pathlib import Path
from typing import Type
from typing import Dict
from typing import Any
from typing import List
from typing import Union
def simulation_worker(
graph: nx.DiGraph,
rng: np.random.RandomState,
x0: np.ndarray,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
) -> Dict[int, np.ndarray]:
"""
:param x0:
:param parameter_path:
:param circuit_type:
:param graph:
:param rng:
:param duration:
:param kernel_parameters:
:return:
"""
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
circuit.run(duration)
return circuit.get_spike_trains()
def simulate_model(
x0,
parameter_path: Path,
fitted_graph_path: Path,
name: str,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
"""
Data in list is matched to the order of the graphs in the fitted graph
result file.
:param x0:
:param parameter_path:
:param fitted_graph_path:
:param name:
:param client:
:param duration:
:param seed:
:param circuit_type:
:param save_path:
:param kernel_parameters:
:return:
"""
if kernel_parameters is None:
kernel_parameters = {}
fitted_graph_results = neuralnetsim.load(fitted_graph_path)
rng = np.random.RandomState(seed)
sims = client.map(
simulation_worker,
[graph for graph in fitted_graph_results['graphs']],
[np.random.RandomState(rng.randint(1, 2**31))
for _ in range(len(fitted_graph_results['graphs']))],
pure=False,
x0=x0,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'seed': seed,
'name': name,
'duration': duration,
'kernel_parameters': kernel_parameters
},
save_path
)
def grid_worker(
graph: nx.DiGraph,
rng: np.random.RandomState,
par: float,
x0: np.ndarray,
par_key: str,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
) -> Dict[int, np.ndarray]:
"""
:param graph:
:param rng:
:param par:
:param x0:
:param par_key:
:param parameter_path:
:param circuit_type:
:param duration:
:param kernel_parameters:
:return:
"""
kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),
**kernel_parameters})
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
circuit_parameters.set_par(par_key, par)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
circuit.run(duration)
# if not circuit.run(duration,
# memory_guard={
# 'duration': 1000.0,
# 'max_spikes': 8000 # ~10 spikes/ms
# }):
# return {node: np.ndarray([])
# for node in circuit_parameters.network.nodes()}
return circuit.get_spike_trains()
def simulate_grid(
x0,
par_range: Union[List[float], np.ndarray],
par_key: str,
parameter_path: Path,
fitted_graph_path: Path,
name: str,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
"""
:param x0:
:param par_range:
:param par_key:
:param parameter_path:
:param fitted_graph_path:
:param name:
:param client:
:param duration:
:param seed:
:param circuit_type:
:param save_path:
:param kernel_parameters:
:return:
"""
if kernel_parameters is None:
kernel_parameters = {}
fitted_graph_results = neuralnetsim.load(fitted_graph_path)
rng = np.random.RandomState(seed)
num_graphs = range(len(fitted_graph_results['graphs']))
sims = client.map(
grid_worker,
[graph
for _ in par_range
for graph in fitted_graph_results['graphs']],
[np.random.RandomState(rng.randint(1, 2**31))
for _ in par_range
for _ in num_graphs],
[par for par in par_range
for _ in num_graphs],
pure=False,
x0=x0,
par_key=par_key,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'original_graph': fitted_graph_results['original'],
'graphs': [graph for _ in par_range
for graph in fitted_graph_results['graphs']],
'target_modularities':
[mu for _ in par_range
for mu in fitted_graph_results['target_modularities']],
'grid_par': [par for par in par_range for _ in num_graphs],
'par_key': par_key,
'seed': seed,
'name': name,
'duration': duration,
'kernel_parameters': kernel_parameters
},
save_path
)
def orig_worker(
rng: np.random.RandomState,
par: float,
graph: nx.DiGraph,
x0: np.ndarray,
par_key: str,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
):
kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),
**kernel_parameters})
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
circuit_parameters.set_par(par_key, par)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
if not circuit.run(duration,
memory_guard={
'duration': 1000.0,
'max_spikes': 8000 # ~10 spikes/ms
}):
return {node: np.ndarray([])
for node in circuit_parameters.network.nodes()}
return circuit.get_spike_trains()
def simulate_orig(
x0,
par_range: Union[List[float], np.ndarray],
par_key: str,
parameter_path: Path,
orig_graph_path: Path,
n_trials: int,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
"""
:param x0:
:param par_range:
:param par_key:
:param parameter_path:
:param fitted_graph_path:
:param name:
:param client:
:param duration:
:param seed:
:param circuit_type:
:param save_path:
:param kernel_parameters:
:return:
"""
if kernel_parameters is None:
kernel_parameters = {}
graph = neuralnetsim.load(orig_graph_path)
rng = np.random.RandomState(seed)
sims = client.map(
orig_worker,
[np.random.RandomState(rng.randint(1, 2**31))
for _ in par_range
for _ in range(n_trials)],
[par for par in par_range
for _ in range(n_trials)],
pure=False,
x0=x0,
graph=graph,
par_key=par_key,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'control_var': [par for par in par_range for _ in range(n_trials)],
'control_key': par_key,
'seed': seed,
'duration': duration
},
save_path
)
| 28.8 | 79 | 0.584215 | __all__ = ["simulate_model",
"simulate_grid",
"simulate_orig"]
import neuralnetsim
import networkx as nx
import numpy as np
from distributed import Client
from pathlib import Path
from typing import Type
from typing import Dict
from typing import Any
from typing import List
from typing import Union
def simulation_worker(
graph: nx.DiGraph,
rng: np.random.RandomState,
x0: np.ndarray,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
) -> Dict[int, np.ndarray]:
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
circuit.run(duration)
return circuit.get_spike_trains()
def simulate_model(
x0,
parameter_path: Path,
fitted_graph_path: Path,
name: str,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
if kernel_parameters is None:
kernel_parameters = {}
fitted_graph_results = neuralnetsim.load(fitted_graph_path)
rng = np.random.RandomState(seed)
sims = client.map(
simulation_worker,
[graph for graph in fitted_graph_results['graphs']],
[np.random.RandomState(rng.randint(1, 2**31))
for _ in range(len(fitted_graph_results['graphs']))],
pure=False,
x0=x0,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'seed': seed,
'name': name,
'duration': duration,
'kernel_parameters': kernel_parameters
},
save_path
)
def grid_worker(
graph: nx.DiGraph,
rng: np.random.RandomState,
par: float,
x0: np.ndarray,
par_key: str,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
) -> Dict[int, np.ndarray]:
kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),
**kernel_parameters})
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
circuit_parameters.set_par(par_key, par)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
circuit.run(duration)
return circuit.get_spike_trains()
def simulate_grid(
x0,
par_range: Union[List[float], np.ndarray],
par_key: str,
parameter_path: Path,
fitted_graph_path: Path,
name: str,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
if kernel_parameters is None:
kernel_parameters = {}
fitted_graph_results = neuralnetsim.load(fitted_graph_path)
rng = np.random.RandomState(seed)
num_graphs = range(len(fitted_graph_results['graphs']))
sims = client.map(
grid_worker,
[graph
for _ in par_range
for graph in fitted_graph_results['graphs']],
[np.random.RandomState(rng.randint(1, 2**31))
for _ in par_range
for _ in num_graphs],
[par for par in par_range
for _ in num_graphs],
pure=False,
x0=x0,
par_key=par_key,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'original_graph': fitted_graph_results['original'],
'graphs': [graph for _ in par_range
for graph in fitted_graph_results['graphs']],
'target_modularities':
[mu for _ in par_range
for mu in fitted_graph_results['target_modularities']],
'grid_par': [par for par in par_range for _ in num_graphs],
'par_key': par_key,
'seed': seed,
'name': name,
'duration': duration,
'kernel_parameters': kernel_parameters
},
save_path
)
def orig_worker(
rng: np.random.RandomState,
par: float,
graph: nx.DiGraph,
x0: np.ndarray,
par_key: str,
parameter_path: Path,
circuit_type: Union[Type[neuralnetsim.DistributionCircuit],
Type[neuralnetsim.NeuralCircuit]],
duration: float,
kernel_parameters: Dict
):
kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),
**kernel_parameters})
circuit_parameters = neuralnetsim.load(parameter_path)
circuit_parameters.network = graph
circuit_parameters.from_optimizer(x0)
circuit_parameters.set_par(par_key, par)
with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,
circuit_parameters, rng) as circuit:
if not circuit.run(duration,
memory_guard={
'duration': 1000.0,
'max_spikes': 8000
}):
return {node: np.ndarray([])
for node in circuit_parameters.network.nodes()}
return circuit.get_spike_trains()
def simulate_orig(
x0,
par_range: Union[List[float], np.ndarray],
par_key: str,
parameter_path: Path,
orig_graph_path: Path,
n_trials: int,
client: Client,
duration: float,
seed: int,
circuit_type: Type,
save_path: Path,
kernel_parameters: Dict[str, Any] = None,
):
if kernel_parameters is None:
kernel_parameters = {}
graph = neuralnetsim.load(orig_graph_path)
rng = np.random.RandomState(seed)
sims = client.map(
orig_worker,
[np.random.RandomState(rng.randint(1, 2**31))
for _ in par_range
for _ in range(n_trials)],
[par for par in par_range
for _ in range(n_trials)],
pure=False,
x0=x0,
graph=graph,
par_key=par_key,
parameter_path=parameter_path,
circuit_type=circuit_type,
duration=duration,
kernel_parameters=kernel_parameters
)
data = client.gather(sims)
neuralnetsim.save(
{
'spike_data': data,
'control_var': [par for par in par_range for _ in range(n_trials)],
'control_key': par_key,
'seed': seed,
'duration': duration
},
save_path
)
| true | true |
f71d2f0b2b074ee07a851d169f53038091cd1476 | 874 | py | Python | npapibench/pyimpl.py | phetdam/numpy-api-bench | f80b104c464111a5678d7a657128cbff0497830c | [
"MIT"
] | 1 | 2020-09-01T07:35:29.000Z | 2020-09-01T07:35:29.000Z | npapibench/pyimpl.py | phetdam/numpy-api-bench | f80b104c464111a5678d7a657128cbff0497830c | [
"MIT"
] | null | null | null | npapibench/pyimpl.py | phetdam/numpy-api-bench | f80b104c464111a5678d7a657128cbff0497830c | [
"MIT"
] | null | null | null | """Python implementation of zero mean unit variance scaling function.
.. codeauthor:: Derek Huang <djh458@stern.nyu.edu>
"""
def stdscale(ar, ddof=0):
"""Center and scale numpy.ndarray to zero mean, unit variance.
Treats the array like a single flattened array and computes the mean and
standard deviation over all the elements.
Parameters
----------
ar : numpy.ndarray
Arbitrary numpy.ndarray that can be converted to NPY_DOUBLE type
ddof : int, default=0
Delta degrees of freedom, i.e. so that the divisor used in standard
deviation computation is ``n_obs - ddof``.
Returns
-------
numpy.ndarray
A new numpy.ndarray centered and scaled with zero mean, unit variance,
with type NPY_DOUBLE, flags NPY_ARRAY_CARRAY, same shape as ar.
"""
return (ar - ar.mean()) / ar.std(ddof=ddof) | 32.37037 | 78 | 0.672769 |
def stdscale(ar, ddof=0):
return (ar - ar.mean()) / ar.std(ddof=ddof) | true | true |
f71d2fed645a3de12724dfe4fb7acd0036387d89 | 1,599 | py | Python | Engine/Render/rpplugins/skin_shading/plugin.py | kergalym/Korlan | cc3141969d21898842a008b49f8b42492d2cf6e4 | [
"MIT"
] | 3 | 2019-09-17T15:26:42.000Z | 2021-12-09T00:42:32.000Z | Engine/Render/rpplugins/skin_shading/plugin.py | kergalym/Korlan | cc3141969d21898842a008b49f8b42492d2cf6e4 | [
"MIT"
] | null | null | null | Engine/Render/rpplugins/skin_shading/plugin.py | kergalym/Korlan | cc3141969d21898842a008b49f8b42492d2cf6e4 | [
"MIT"
] | 1 | 2019-09-17T13:21:31.000Z | 2019-09-17T13:21:31.000Z | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from Engine.Render.rpcore.pluginbase.base_plugin import BasePlugin
from .skin_shading_stage import SkinShadingStage
class Plugin(BasePlugin):
name = "Skin Shading"
author = "tobspr <tobias.springer1@gmail.com>"
description = ("This plugin adds support for Seperable Screen Space Subsurface "
"Scattering (SSSSS). This improves skin rendering.")
version = "1.0"
def on_stage_setup(self):
self.stage = self.create_stage(SkinShadingStage)
| 38.071429 | 84 | 0.773609 |
from Engine.Render.rpcore.pluginbase.base_plugin import BasePlugin
from .skin_shading_stage import SkinShadingStage
class Plugin(BasePlugin):
name = "Skin Shading"
author = "tobspr <tobias.springer1@gmail.com>"
description = ("This plugin adds support for Seperable Screen Space Subsurface "
"Scattering (SSSSS). This improves skin rendering.")
version = "1.0"
def on_stage_setup(self):
self.stage = self.create_stage(SkinShadingStage)
| true | true |
f71d3001f6f3b426e0db1fb36733beaceff3b849 | 9,171 | py | Python | python/paddle/nn/functional/input.py | ZibinGuo/Paddle | 6e0892312de5e4ba76d980ff0e4322ac55ca0d07 | [
"Apache-2.0"
] | 1 | 2022-02-22T01:08:00.000Z | 2022-02-22T01:08:00.000Z | python/paddle/nn/functional/input.py | ZibinGuo/Paddle | 6e0892312de5e4ba76d980ff0e4322ac55ca0d07 | [
"Apache-2.0"
] | null | null | null | python/paddle/nn/functional/input.py | ZibinGuo/Paddle | 6e0892312de5e4ba76d980ff0e4322ac55ca0d07 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import warnings
from ...fluid.framework import in_dygraph_mode
from ...static import Variable
from ...fluid.layer_helper import LayerHelper
from ...fluid.layers import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle import _C_ops
__all__ = []
def one_hot(x, num_classes, name=None):
"""
The operator converts each id in the input 'x' to an one-hot vector with a
num_classes length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor is generated by appending num_classes dimension
behind the last dimension of the 'x' shape.
.. code-block:: text
Example 1:
input:
x.shape = [4]
x.data = [1, 1, 3, 0]
num_classes = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2:
input:
x.shape = [4]
x.data = [1, 1, 5, 0]
num_classes = 4
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than num_classes,
so it throws an exception.
Args:
x(Tensor): Tensor with shape :math:`[N_1, N_2, ..., N_k]` ,
which contains at least one dimension. The data type is int32 or int64.
num_classes(int): An integer defining the num_classes of the one hot dimension. If input 'x'
is word id, num_classes is generally the dictionary size.
Returns:
Tensor: The one-hot representations of 'x'. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = paddle.to_tensor([1, 1, 3, 0], dtype='int64')
# label.shape = [4]
one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4)
# one_hot_label.shape = [4, 4]
# one_hot_label = [[0., 1., 0., 0.],
# [0., 1., 0., 0.],
# [0., 0., 0., 1.],
# [1., 0., 0., 0.]]
"""
if in_dygraph_mode():
return _C_ops.one_hot_v2(x, 'depth', num_classes, 'allow_out_of_range',
False)
else:
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True)
return one_hot_out
def embedding(x, weight, padding_idx=None, sparse=False, name=None):
r"""
The operator is used to lookup embeddings vector of ids provided by :attr:`x` .
The shape of output Tensor is generated by appending the last dimension of the input Tensor shape
with embedding size.
**Note:** The id in :attr:`x` must satisfy :math:`0 =< id < weight.shape[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
x is a Tensor.
padding_idx = -1
x.data = [[1, 3], [2, 4], [4, 127]]
x.shape = [3, 2]
weight.shape = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when id is 127.
Args:
x(Tensor): A Tensor with type int32/int64, which contains the id information. The value of the input id should
satisfy :math:`0<= id < weight.shape[0]` .
weight (Tensor): The weight. A Tensor with shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizers does not support sparse update,
such as :ref:`api_paddle_optimizer_adadelta_Adadelta` , :ref:`api_paddle_optimizer_adamax_Adamax` , :ref:`api_paddle_optimizer_lamb_Lamb`.
In these cases, sparse must be False. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-weight.shape[0], weight.shape[0]).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`weight.shape[0] + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: Embedding Tensor mapped by x. The data type is the same as :attr:`weight`.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
x0 = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
w0 = np.full(shape=(10, 3), fill_value=2).astype(np.float32)
# x.data = [[3], [4], [5]]
# x.shape = [3, 1]
x = paddle.to_tensor(x0, stop_gradient=False)
# w.data = [[2. 2. 2.] ... [2. 2. 2.]]
# w.shape = [10, 3]
w = paddle.to_tensor(w0, stop_gradient=False)
# emb.data = [[[2., 2., 2.]], [[2., 2., 2.]], [[2., 2., 2.]]]
# emb.shape = [3, 1, 3]
emb = nn.functional.embedding(
x=x, weight=w, sparse=True, name="embedding")
"""
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
weight.shape[0] + padding_idx)
if padding_idx >= weight.shape[0] or padding_idx < -weight.shape[0]:
raise ValueError("padding_idx must be within [-{}, {})".format(
weight.shape[0], weight.shape[0]))
if in_dygraph_mode():
return _C_ops.lookup_table_v2(
weight, x, 'is_sparse', sparse, 'is_distributed', False,
'remote_prefetch', False, 'padding_idx', padding_idx)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype(input_param_name='weight')
check_variable_and_dtype(x, 'input',
['uint8', 'int8', 'int16', 'int32', 'int64'],
'embedding')
is_distributed = False
remote_prefetch = sparse and (not is_distributed)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='lookup_table_v2',
inputs={'Ids': x,
'W': weight},
outputs={'Out': tmp},
attrs={
'is_sparse': sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
| 40.223684 | 150 | 0.574856 |
from __future__ import print_function
import warnings
from ...fluid.framework import in_dygraph_mode
from ...static import Variable
from ...fluid.layer_helper import LayerHelper
from ...fluid.layers import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle import _C_ops
__all__ = []
def one_hot(x, num_classes, name=None):
if in_dygraph_mode():
return _C_ops.one_hot_v2(x, 'depth', num_classes, 'allow_out_of_range',
False)
else:
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True)
return one_hot_out
def embedding(x, weight, padding_idx=None, sparse=False, name=None):
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
weight.shape[0] + padding_idx)
if padding_idx >= weight.shape[0] or padding_idx < -weight.shape[0]:
raise ValueError("padding_idx must be within [-{}, {})".format(
weight.shape[0], weight.shape[0]))
if in_dygraph_mode():
return _C_ops.lookup_table_v2(
weight, x, 'is_sparse', sparse, 'is_distributed', False,
'remote_prefetch', False, 'padding_idx', padding_idx)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype(input_param_name='weight')
check_variable_and_dtype(x, 'input',
['uint8', 'int8', 'int16', 'int32', 'int64'],
'embedding')
is_distributed = False
remote_prefetch = sparse and (not is_distributed)
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='lookup_table_v2',
inputs={'Ids': x,
'W': weight},
outputs={'Out': tmp},
attrs={
'is_sparse': sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
| true | true |
f71d3032e42d801860be4840ffd19a2487705594 | 17,260 | py | Python | test/functional/vuicash_condensing_txs.py | vuicash/qtum | fbc207749ed96e64073b96bd8768356e6b16ecf6 | [
"MIT"
] | null | null | null | test/functional/vuicash_condensing_txs.py | vuicash/qtum | fbc207749ed96e64073b96bd8768356e6b16ecf6 | [
"MIT"
] | null | null | null | test/functional/vuicash_condensing_txs.py | vuicash/qtum | fbc207749ed96e64073b96bd8768356e6b16ecf6 | [
"MIT"
] | 1 | 2022-01-10T00:08:47.000Z | 2022-01-10T00:08:47.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.vuicash import *
from test_framework.vuicashconfig import *
import sys
class CondensingTxsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-txindex=1', '-rpcmaxgasprice=10000000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# verify that the state hash is not 0 on genesis
def setup_contracts(self):
"""
pragma solidity ^0.4.0;
contract Sender1 {
// Sender2 sender2;
// Sender3 sender3;
address public sender2;
address public sender3;
function Sender1() {
}
function setSenders(address senderx, address sendery) public{
// sender2=Sender2(senderx);
// sender3=Sender3(sendery);
sender2 = senderx;
sender3 = sendery;
}
function share() public payable{
if(msg.sender != address(sender3)){
// sender2.share.value(msg.value/2);
sender2.call.value(msg.value/2)(bytes4(sha3("share()")));
}
}
function sendAll() public payable{
// sender2.keep.value(msg.value + this.balance);
// sender2.call.value(msg.value + this.balance)(bytes4(sha3("keep()")));
sender2.call.value(this.balance)(bytes4(sha3("keep()")));
}
function keep() public payable{
}
function() payable { } //always payable
}
contract Sender2{
// Sender1 sender1;
// Sender3 sender3;
address public sender1;
address public sender3;
function Sender2() {
}
function setSenders(address senderx, address sendery) public{
// sender1=Sender1(senderx);
// sender3=Sender3(sendery);
sender1 = senderx;
sender3 = sendery;
}
function share() public payable{
// sender3.share.value(msg.value/2);
sender3.call.value(msg.value/2)(bytes4(sha3("share()")));
}
function keep() public payable{
}
function withdrawAll() public{
// sender3.withdraw();
sender3.call(bytes4(sha3("withdraw()")));
msg.sender.send(this.balance);
}
function() payable { } //always payable
}
contract Sender3 {
// Sender1 sender1;
// Sender2 sender2;
address public sender1;
address public sender2;
function Sender3() {
}
function setSenders(address senderx, address sendery) public{
// sender1=Sender1(senderx);
// sender2=Sender2(sendery);
sender1 = senderx;
sender2 = sendery;
}
function share() public payable{
// sender1.share.value(msg.value/2);
// sender2.keep.value(msg.value/4);
sender1.call.value(msg.value/2)(bytes4(sha3("share()")));
sender2.call.value(msg.value/4)(bytes4(sha3("keep()")));
}
function withdraw() public{
msg.sender.send(this.balance);
}
function() payable { } //always payable
}
"""
sender1_bytecode = "6060604052341561000c57fe5b5b5b5b6104cb8061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063622836a3146100d45780639b0079d414610126578063a8d5fd6514610178578063e14f680f14610182578063e4d06d821461018c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610196565b005b34156100dc57fe5b6100e461021d565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561012e57fe5b610136610243565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610180610269565b005b61018a6103a9565b005b61019461049c565b005b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156103a657600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660023481151561030557fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff163160405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b5600a165627a7a72305820b491c90fc7b4f09ab3f6262b83707908d390a97f9730429d1ff5fa8e44a63b190029"
self.sender1 = self.node.createcontract(sender1_bytecode, 1000000)['address']
sender2_bytecode = "6060604052341561000c57fe5b5b5b5b6104b28061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063853828b6146100d45780639b0079d4146100e6578063a8d5fd6514610138578063e4d06d8214610142578063f34e0e7b1461014c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061019e565b005b34156100dc57fe5b6100e4610225565b005b34156100ee57fe5b6100f661034f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610140610375565b005b61014a61045d565b005b341561015457fe5b61015c610460565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660405180807f7769746864726177282900000000000000000000000000000000000000000000815250600a01905060405180910390207c010000000000000000000000000000000000000000000000000000000090046040518163ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038160008761646e5a03f192505050503373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156103ba57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a723058201842d5027fea2d624a38de6731e71832836efe8c51e5815b8ad85b7f3639e72a0029"
self.sender2 = self.node.createcontract(sender2_bytecode, 1000000)['address']
sender3_bytecode = "6060604052341561000c57fe5b5b5b5b6104a88061001e6000396000f3006060604052361561006b576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680633ccfd60b146100745780635579818d14610086578063622836a3146100db578063a8d5fd651461012d578063f34e0e7b14610137575b6100725b5b565b005b341561007c57fe5b610084610189565b005b341561008e57fe5b6100d9600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506101dc565b005b34156100e357fe5b6100eb610263565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610135610289565b005b341561013f57fe5b610147610456565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156102ce57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f1935050505050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166004348115156103b357fe5b0460405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a72305820cb1b06b481990e1e218f7d0b51a3ffdf5b7439cfdd9bb2dccc1476cb84dfc95b0029"
self.sender3 = self.node.createcontract(sender3_bytecode, 1000000)['address']
self.node.generate(1)
assert(len(self.node.listcontracts()) == 3+NUM_DEFAULT_DGP_CONTRACTS)
self.keep_abi = "e4d06d82"
self.sendAll_abi = "e14f680f"
self.setSenders_abi = "5579818d"
self.share_abi = "a8d5fd65"
self.withdrawAll_abi = "853828b6"
self.withdraw_abi = "3ccfd60b"
self.sender1_abi = "f34e0e7b"
self.sender2_abi = "622836a3"
self.sender3_abi = "9b0079d4"
padded_sender1 = self.sender1.zfill(64)
padded_sender2 = self.sender2.zfill(64)
padded_sender3 = self.sender3.zfill(64)
self.node.sendtocontract(self.sender1, self.setSenders_abi + padded_sender2 + padded_sender3)
self.node.sendtocontract(self.sender2, self.setSenders_abi + padded_sender1 + padded_sender3)
self.node.sendtocontract(self.sender3, self.setSenders_abi + padded_sender1 + padded_sender2)
self.node.generate(1)
# Verify that the senders have been set correctly
assert_equal(self.node.callcontract(self.sender1, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
assert_equal(self.node.callcontract(self.sender1, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender2, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender2, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender3, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender3, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
def run_test(self):
self.node = self.nodes[0]
self.node.generate(COINBASE_MATURITY+50)
print("Setting up contracts and calling setSenders")
self.setup_contracts()
A1 = self.node.getnewaddress()
self.node.sendtoaddress(A1, 1)
self.node.generate(1)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
T1_id = self.node.sendtocontract(self.sender1, self.share_abi, 8)['txid']
B2_id = self.node.generate(1)[0]
B2 = self.node.getblock(B2_id)
# Since this is a ṔoW block we only require 3 txs atm (coinbase, T1 and COND tx)
assert_equal(B2['tx'][1], T1_id)
assert_equal(len(B2['tx']), 3)
C1_id = B2['tx'][2]
C1 = self.node.getrawtransaction(C1_id, True)
assert_vin(C1, [('OP_SPEND', )])
assert_vout(C1, [(5, 'call'), (2.5, 'call'), (0.5, 'call')])
assert("vin" in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3))
# We set the tx fee of T2 to a higher value such that it will be prioritized (be at index 1 in the block)
T2_id = self.node.sendtocontract(self.sender1, self.keep_abi, 2, 50000, 0.0001)['txid']
T3_id = self.node.sendtocontract(self.sender1, self.sendAll_abi, 2)['txid']
B3_id = self.node.generate(1)[0]
B3 = self.node.getblock(B3_id)
# coinbase, T2, C2, T3, C3
assert_equal(len(B3['tx']), 5)
assert_equal(B3['tx'][1], T2_id)
C2_id = B3['tx'][2]
C3_id = B3['tx'][4]
C2 = self.node.getrawtransaction(C2_id, True)
C3 = self.node.getrawtransaction(C3_id, True)
assert_vin(C2, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C2, [(7, 'call')])
assert_vin(C3, [('OP_SPEND', ), ('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C3, [(11.5, 'call')])
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3))
# We need the txfee to be higher than T5 so that T4 tx is prioritized over T5.
# We set the gas such that the the tx will run but not immediately throw a out of gas exception
T4_raw = make_transaction(self.node, [make_vin(self.node, 3*COIN)], [make_op_call_output(2*COIN, b"\x04", 22000, CScriptNum(VUI_MIN_GAS_PRICE), hex_str_to_bytes(self.share_abi), hex_str_to_bytes(self.sender2))])
T4_id = self.node.sendrawtransaction(T4_raw, 0)
T5_id = self.node.sendtocontract(self.sender2, self.withdrawAll_abi, 0, 1000000, VUI_MIN_GAS_PRICE_STR, A1)['txid']
B4_id = self.node.generate(1)[0]
B4 = self.node.getblock(B4_id)
# Coinbase, T4, R1, T5, C4
assert_equal(len(B4['tx']), 5)
assert_equal(B4['tx'][1], T4_id)
assert_equal(B4['tx'][3], T5_id)
R1_id = B4['tx'][2]
R1 = self.node.getrawtransaction(R1_id, True)
C4_id = B4['tx'][4]
C4 = self.node.getrawtransaction(C4_id, True)
assert_vout(R1, [(2, 'pubkeyhash')])
assert_vin(C4, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C4, [(12, 'pubkeyhash')])
assert_equal(sum(self.node.listcontracts().values()), 0)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
if __name__ == '__main__':
CondensingTxsTest().main()
| 76.711111 | 2,543 | 0.773638 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.vuicash import *
from test_framework.vuicashconfig import *
import sys
class CondensingTxsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-txindex=1', '-rpcmaxgasprice=10000000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_contracts(self):
sender1_bytecode = "6060604052341561000c57fe5b5b5b5b6104cb8061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063622836a3146100d45780639b0079d414610126578063a8d5fd6514610178578063e14f680f14610182578063e4d06d821461018c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610196565b005b34156100dc57fe5b6100e461021d565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561012e57fe5b610136610243565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610180610269565b005b61018a6103a9565b005b61019461049c565b005b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156103a657600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660023481151561030557fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff163160405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b5600a165627a7a72305820b491c90fc7b4f09ab3f6262b83707908d390a97f9730429d1ff5fa8e44a63b190029"
self.sender1 = self.node.createcontract(sender1_bytecode, 1000000)['address']
sender2_bytecode = "6060604052341561000c57fe5b5b5b5b6104b28061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063853828b6146100d45780639b0079d4146100e6578063a8d5fd6514610138578063e4d06d8214610142578063f34e0e7b1461014c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061019e565b005b34156100dc57fe5b6100e4610225565b005b34156100ee57fe5b6100f661034f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610140610375565b005b61014a61045d565b005b341561015457fe5b61015c610460565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660405180807f7769746864726177282900000000000000000000000000000000000000000000815250600a01905060405180910390207c010000000000000000000000000000000000000000000000000000000090046040518163ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038160008761646e5a03f192505050503373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156103ba57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a723058201842d5027fea2d624a38de6731e71832836efe8c51e5815b8ad85b7f3639e72a0029"
self.sender2 = self.node.createcontract(sender2_bytecode, 1000000)['address']
sender3_bytecode = "6060604052341561000c57fe5b5b5b5b6104a88061001e6000396000f3006060604052361561006b576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680633ccfd60b146100745780635579818d14610086578063622836a3146100db578063a8d5fd651461012d578063f34e0e7b14610137575b6100725b5b565b005b341561007c57fe5b610084610189565b005b341561008e57fe5b6100d9600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506101dc565b005b34156100e357fe5b6100eb610263565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610135610289565b005b341561013f57fe5b610147610456565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156102ce57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f1935050505050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166004348115156103b357fe5b0460405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a72305820cb1b06b481990e1e218f7d0b51a3ffdf5b7439cfdd9bb2dccc1476cb84dfc95b0029"
self.sender3 = self.node.createcontract(sender3_bytecode, 1000000)['address']
self.node.generate(1)
assert(len(self.node.listcontracts()) == 3+NUM_DEFAULT_DGP_CONTRACTS)
self.keep_abi = "e4d06d82"
self.sendAll_abi = "e14f680f"
self.setSenders_abi = "5579818d"
self.share_abi = "a8d5fd65"
self.withdrawAll_abi = "853828b6"
self.withdraw_abi = "3ccfd60b"
self.sender1_abi = "f34e0e7b"
self.sender2_abi = "622836a3"
self.sender3_abi = "9b0079d4"
padded_sender1 = self.sender1.zfill(64)
padded_sender2 = self.sender2.zfill(64)
padded_sender3 = self.sender3.zfill(64)
self.node.sendtocontract(self.sender1, self.setSenders_abi + padded_sender2 + padded_sender3)
self.node.sendtocontract(self.sender2, self.setSenders_abi + padded_sender1 + padded_sender3)
self.node.sendtocontract(self.sender3, self.setSenders_abi + padded_sender1 + padded_sender2)
self.node.generate(1)
assert_equal(self.node.callcontract(self.sender1, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
assert_equal(self.node.callcontract(self.sender1, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender2, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender2, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender3, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender3, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
def run_test(self):
self.node = self.nodes[0]
self.node.generate(COINBASE_MATURITY+50)
print("Setting up contracts and calling setSenders")
self.setup_contracts()
A1 = self.node.getnewaddress()
self.node.sendtoaddress(A1, 1)
self.node.generate(1)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
T1_id = self.node.sendtocontract(self.sender1, self.share_abi, 8)['txid']
B2_id = self.node.generate(1)[0]
B2 = self.node.getblock(B2_id)
assert_equal(B2['tx'][1], T1_id)
assert_equal(len(B2['tx']), 3)
C1_id = B2['tx'][2]
C1 = self.node.getrawtransaction(C1_id, True)
assert_vin(C1, [('OP_SPEND', )])
assert_vout(C1, [(5, 'call'), (2.5, 'call'), (0.5, 'call')])
assert("vin" in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3))
T2_id = self.node.sendtocontract(self.sender1, self.keep_abi, 2, 50000, 0.0001)['txid']
T3_id = self.node.sendtocontract(self.sender1, self.sendAll_abi, 2)['txid']
B3_id = self.node.generate(1)[0]
B3 = self.node.getblock(B3_id)
assert_equal(len(B3['tx']), 5)
assert_equal(B3['tx'][1], T2_id)
C2_id = B3['tx'][2]
C3_id = B3['tx'][4]
C2 = self.node.getrawtransaction(C2_id, True)
C3 = self.node.getrawtransaction(C3_id, True)
assert_vin(C2, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C2, [(7, 'call')])
assert_vin(C3, [('OP_SPEND', ), ('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C3, [(11.5, 'call')])
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3))
T4_raw = make_transaction(self.node, [make_vin(self.node, 3*COIN)], [make_op_call_output(2*COIN, b"\x04", 22000, CScriptNum(VUI_MIN_GAS_PRICE), hex_str_to_bytes(self.share_abi), hex_str_to_bytes(self.sender2))])
T4_id = self.node.sendrawtransaction(T4_raw, 0)
T5_id = self.node.sendtocontract(self.sender2, self.withdrawAll_abi, 0, 1000000, VUI_MIN_GAS_PRICE_STR, A1)['txid']
B4_id = self.node.generate(1)[0]
B4 = self.node.getblock(B4_id)
assert_equal(len(B4['tx']), 5)
assert_equal(B4['tx'][1], T4_id)
assert_equal(B4['tx'][3], T5_id)
R1_id = B4['tx'][2]
R1 = self.node.getrawtransaction(R1_id, True)
C4_id = B4['tx'][4]
C4 = self.node.getrawtransaction(C4_id, True)
assert_vout(R1, [(2, 'pubkeyhash')])
assert_vin(C4, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C4, [(12, 'pubkeyhash')])
assert_equal(sum(self.node.listcontracts().values()), 0)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
if __name__ == '__main__':
CondensingTxsTest().main()
| true | true |
f71d31625c79c1f1f9b8be2745043bae002d419e | 5,644 | py | Python | meraki/controllers/api_usage_controller.py | bossypants22/python-sdk-test | 37701d62dc18c2abb910eb790ab978913adcaf7b | [
"MIT"
] | null | null | null | meraki/controllers/api_usage_controller.py | bossypants22/python-sdk-test | 37701d62dc18c2abb910eb790ab978913adcaf7b | [
"MIT"
] | null | null | null | meraki/controllers/api_usage_controller.py | bossypants22/python-sdk-test | 37701d62dc18c2abb910eb790ab978913adcaf7b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class APIUsageController(BaseController):
"""A Controller to access Endpoints in the meraki API."""
def get_organization_api_requests(self,
options=dict()):
"""Does a GET request to /organizations/{organizationId}/apiRequests.
List the API requests made by an organization
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
t_0 -- string -- The beginning of the timespan for the
data. The maximum lookback period is 31 days from
today.
t_1 -- string -- The end of the timespan for the data. t1
can be a maximum of 31 days after t0.
timespan -- float -- The timespan for which the
information will be fetched. If specifying timespan,
do not specify parameters t0 and t1. The value must be
in seconds and be less than or equal to 31 days. The
default is 31 days.
per_page -- int -- The number of entries per page
returned. Acceptable range is 3 - 1000. Default is
50.
starting_after -- string -- A token used by the server to
indicate the start of the page. Often this is a
timestamp or an ID but it is not limited to those.
This parameter should not be defined by client
applications. The link for the first, last, prev, or
next page in the HTTP Link header should define it.
ending_before -- string -- A token used by the server to
indicate the end of the page. Often this is a
timestamp or an ID but it is not limited to those.
This parameter should not be defined by client
applications. The link for the first, last, prev, or
next page in the HTTP Link header should define it.
admin_id -- string -- Filter the results by the ID of the
admin who made the API requests
path -- string -- Filter the results by the path of the
API requests
method -- string -- Filter the results by the method of
the API requests (must be 'GET', 'PUT', 'POST' or
'DELETE')
response_code -- int -- Filter the results by the response
code of the API requests
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/apiRequests'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_parameters = {
't0': options.get('t_0', None),
't1': options.get('t_1', None),
'timespan': options.get('timespan', None),
'perPage': options.get('per_page', None),
'startingAfter': options.get('starting_after', None),
'endingBefore': options.get('ending_before', None),
'adminId': options.get('admin_id', None),
'path': options.get('path', None),
'method': options.get('method', None),
'responseCode': options.get('response_code', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
| 47.428571 | 95 | 0.567151 |
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class APIUsageController(BaseController):
def get_organization_api_requests(self,
options=dict()):
self.validate_parameters(organization_id=options.get("organization_id"))
_url_path = '/organizations/{organizationId}/apiRequests'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_parameters = {
't0': options.get('t_0', None),
't1': options.get('t_1', None),
'timespan': options.get('timespan', None),
'perPage': options.get('per_page', None),
'startingAfter': options.get('starting_after', None),
'endingBefore': options.get('ending_before', None),
'adminId': options.get('admin_id', None),
'path': options.get('path', None),
'method': options.get('method', None),
'responseCode': options.get('response_code', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
_headers = {
'accept': 'application/json'
}
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
return APIHelper.json_deserialize(_context.response.raw_body)
| true | true |
f71d335d4ca28e93dc90aa99a20721087bcac69b | 3,087 | py | Python | koans/about_strings.py | revarcline/python_koans | e52f58d9e0e568f4e8f7ebc79a3046cb60424eca | [
"MIT"
] | null | null | null | koans/about_strings.py | revarcline/python_koans | e52f58d9e0e568f4e8f7ebc79a3046cb60424eca | [
"MIT"
] | null | null | null | koans/about_strings.py | revarcline/python_koans | e52f58d9e0e568f4e8f7ebc79a3046cb60424eca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import Koan
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(string, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(string, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
| 32.15625 | 85 | 0.652089 |
from runner.koan import Koan
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(string, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(string, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
| true | true |
f71d341f596966bbfcf7f5bdd48c9933e254bf52 | 455 | py | Python | data/scripts/templates/object/static/structure/naboo/shared_arbor_corner_90_s01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/static/structure/naboo/shared_arbor_corner_90_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/static/structure/naboo/shared_arbor_corner_90_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/naboo/shared_arbor_corner_90_s01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.764706 | 81 | 0.72967 | true | true | |
f71d35595a558e94e5fbc76d649418de62050aaf | 1,221 | py | Python | pattern_recognition/code/DataSplit.py | geneti/courseworkproj | 5843cc14c2ce01172420befca5d2683f1123096a | [
"MIT"
] | null | null | null | pattern_recognition/code/DataSplit.py | geneti/courseworkproj | 5843cc14c2ce01172420befca5d2683f1123096a | [
"MIT"
] | null | null | null | pattern_recognition/code/DataSplit.py | geneti/courseworkproj | 5843cc14c2ce01172420befca5d2683f1123096a | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
raw_data = pd.read_csv('./raw_data.csv', header = 0, index_col = 0)
sample_num = raw_data.shape[0]
# sort features by nominal or non-nominal
dtypes = {}
for j in range(raw_data.shape[1]):
if isinstance(raw_data.iloc[0,j], str) or pd.isna(raw_data.iloc[0,j]):
dtypes[raw_data.columns[j]] = str
else:
dtypes[raw_data.columns[j]] = np.float64
data = pd.read_csv('./raw_data.csv',sep = ',', header = 0, index_col = 0, dtype = dtypes)
# separate the housing prices into several zones
data['PriceLevel'] = 'level'
for i in range(sample_num):
if data.iloc[i,79] <= 135000:
data.iloc[i,80] = 'level_1'
elif data.iloc[i,79] <= 165000:
data.iloc[i,80] = 'level_2'
elif data.iloc[i,79] <= 200000:
data.iloc[i,80] = 'level_3'
else:
data.iloc[i,80] = 'level_4'
data = data.drop(columns = 'SalePrice')
#shuffle the data
data = data.sample(frac=1).reset_index(drop=True)
print('data: ',data)
tmp = sample_num*9/10
print(data.shape)
train = data.iloc[0:int(tmp),:]
test = data.iloc[int(tmp)+1:sample_num,:]
train.to_csv('./train.csv')
test.to_csv('./test.csv') | 27.133333 | 89 | 0.658477 | import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
raw_data = pd.read_csv('./raw_data.csv', header = 0, index_col = 0)
sample_num = raw_data.shape[0]
dtypes = {}
for j in range(raw_data.shape[1]):
if isinstance(raw_data.iloc[0,j], str) or pd.isna(raw_data.iloc[0,j]):
dtypes[raw_data.columns[j]] = str
else:
dtypes[raw_data.columns[j]] = np.float64
data = pd.read_csv('./raw_data.csv',sep = ',', header = 0, index_col = 0, dtype = dtypes)
data['PriceLevel'] = 'level'
for i in range(sample_num):
if data.iloc[i,79] <= 135000:
data.iloc[i,80] = 'level_1'
elif data.iloc[i,79] <= 165000:
data.iloc[i,80] = 'level_2'
elif data.iloc[i,79] <= 200000:
data.iloc[i,80] = 'level_3'
else:
data.iloc[i,80] = 'level_4'
data = data.drop(columns = 'SalePrice')
data = data.sample(frac=1).reset_index(drop=True)
print('data: ',data)
tmp = sample_num*9/10
print(data.shape)
train = data.iloc[0:int(tmp),:]
test = data.iloc[int(tmp)+1:sample_num,:]
train.to_csv('./train.csv')
test.to_csv('./test.csv') | true | true |
f71d36032e8429f6c06ef136c6f50460f7f5346e | 5,787 | py | Python | neutron/agent/rpc.py | sajuptpm/notification_neutron | 45933f63c9eff0d2931a7209b040ff2dc69835c5 | [
"Apache-2.0"
] | 5 | 2015-10-20T07:56:53.000Z | 2017-12-31T22:39:15.000Z | neutron/agent/rpc.py | sajuptpm/notification_neutron | 45933f63c9eff0d2931a7209b040ff2dc69835c5 | [
"Apache-2.0"
] | null | null | null | neutron/agent/rpc.py | sajuptpm/notification_neutron | 45933f63c9eff0d2931a7209b040ff2dc69835c5 | [
"Apache-2.0"
] | 3 | 2015-05-08T22:36:28.000Z | 2015-10-24T21:25:35.000Z | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import timeutils
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
def create_consumers(endpoints, prefix, topic_details, start_listening=True):
"""Create agent RPC consumers.
:param endpoints: The list of endpoints to process the incoming messages.
:param prefix: Common prefix for the plugin/agent message queues.
:param topic_details: A list of topics. Each topic has a name, an
operation, and an optional host param keying the
subscription to topic.host for plugin calls.
:param start_listening: if True, it starts the processing loop
:returns: A common Connection.
"""
connection = n_rpc.create_connection(new=True)
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
if start_listening:
connection.consume_in_threads()
return connection
class PluginReportStateAPI(object):
"""RPC client used to report state back to plugin.
This class implements the client side of an rpc interface. The server side
can be found in neutron.db.agents_db.AgentExtRpcCallback. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
self.client = n_rpc.get_client(target)
def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': timeutils.strtime(),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)
class PluginApi(object):
'''Agent side of the rpc API.
API version history:
1.0 - Initial version.
1.3 - get_device_details rpc signature upgrade to obtain 'host' and
return value to include fixed_ips and device_owner for
the device port
1.4 - tunnel_sync rpc signature upgrade to obtain 'host'
'''
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_device_details(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_device_details', device=device,
agent_id=agent_id, host=host)
def get_devices_details_list(self, context, devices, agent_id, host=None):
try:
cctxt = self.client.prepare(version='1.3')
res = cctxt.call(context, 'get_devices_details_list',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
# If the server has not been upgraded yet, a DVR-enabled agent
# may not work correctly, however it can function in 'degraded'
# mode, in that DVR routers may not be in the system yet, and
# it might be not necessary to retrieve info about the host.
LOG.warn(_LW('DVR functionality requires a server upgrade.'))
res = [
self.get_device_details(context, device, agent_id, host)
for device in devices
]
return res
def update_device_down(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_down', device=device,
agent_id=agent_id, host=host)
def update_device_up(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_up', device=device,
agent_id=agent_id, host=host)
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
try:
cctxt = self.client.prepare(version='1.4')
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host)
except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
cctxt = self.client.prepare()
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
return res
| 41.042553 | 79 | 0.650423 |
import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import timeutils
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
def create_consumers(endpoints, prefix, topic_details, start_listening=True):
connection = n_rpc.create_connection(new=True)
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
if start_listening:
connection.consume_in_threads()
return connection
class PluginReportStateAPI(object):
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
self.client = n_rpc.get_client(target)
def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': timeutils.strtime(),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)
class PluginApi(object):
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_device_details(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_device_details', device=device,
agent_id=agent_id, host=host)
def get_devices_details_list(self, context, devices, agent_id, host=None):
try:
cctxt = self.client.prepare(version='1.3')
res = cctxt.call(context, 'get_devices_details_list',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('DVR functionality requires a server upgrade.'))
res = [
self.get_device_details(context, device, agent_id, host)
for device in devices
]
return res
def update_device_down(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_down', device=device,
agent_id=agent_id, host=host)
def update_device_up(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_up', device=device,
agent_id=agent_id, host=host)
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
try:
cctxt = self.client.prepare(version='1.4')
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host)
except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
cctxt = self.client.prepare()
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
return res
| true | true |
f71d36615b51bcdf98a5e783025c508c8e09cf93 | 2,236 | py | Python | tests/vsc_pf.py | pydgrid/pydgrid | c56073c385f42883c79333533f7cfb8383a173aa | [
"MIT"
] | 15 | 2019-01-29T08:22:39.000Z | 2022-01-13T20:41:32.000Z | tests/vsc_pf.py | pydgrid/pydgrid | c56073c385f42883c79333533f7cfb8383a173aa | [
"MIT"
] | 1 | 2017-11-28T21:34:52.000Z | 2017-11-28T21:34:52.000Z | tests/vsc_pf.py | pydgrid/pydgrid | c56073c385f42883c79333533f7cfb8383a173aa | [
"MIT"
] | 4 | 2018-02-15T02:12:47.000Z | 2020-02-16T17:52:15.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 9 23:22:02 2017
@author: jmmauricio
"""
from pydgrid import grid
from pydgrid.pydgrid import phasor2time, pq
from pydgrid.pf import pf_eval,time_serie
from pydgrid.electric import bess_vsc, bess_vsc_eval
from pydgrid.simu import simu, f_eval, ini_eval, run_eval
import matplotlib.pyplot as plt
import numpy as np
import time
#def test_dyn_bess():
data = {
"lines":[
{"bus_j": "Bus_1", "bus_k": "Bus_2", "code": "UG1w3", "m": 200},
{"bus_j": "Bus_1", "bus_k": "Bus_3", "code": "UG1w3", "m": 200}
],
"buses":[
{"bus": "Bus_1", "pos_x": 0.0, "pos_y": 0, "units": "m", "U_kV":0.4},
{"bus": "Bus_2", "pos_x": 110.0, "pos_y": 20, "units": "m", "U_kV":0.4},
{"bus": "Bus_3", "pos_x": 110.0, "pos_y":-20, "units": "m", "U_kV":0.4}
],
"grid_formers":[
{"bus": "Bus_1",
"bus_nodes": [1, 2, 3], "deg": [ 18.43101738, -103.41207707, 144.94884128],
"kV": [ 0.24982762, 0.21600212, 0.22831829]}
],
"grid_feeders":[{"bus": "Bus_2","bus_nodes": [1, 2, 3],
"kW": [0,0,0], "kvar": [0,0,0],
"kA": [0,0,0], "phi_deg":[30, 30, 30]},
{"bus": "Bus_3","bus_nodes": [1, 2, 3],
"type":"vsc","control_type":"pq_leon",
"kW": 600.0, "kvar": 200.0,
"L":400e-6, "R":0.01,"V_dc":800.0}
],
"line_codes":
{"pry_al_50": {"R1":0.8, "X1": 0.148, "R0":0.8, "X0": 0.148},
"pry_al_95": {"R1":0.403, "X1": 0.129, "R0":0.403, "X0": 0.129},
"pry_al_120": {"R1":0.321, "X1": 0.123, "R0":0.321, "X0": 0.321},
"pry_al_185": {"R1":0.209, "X1": 0.113, "R0":0.209, "X0": 0.209},
"pry_al_300": {"R1":0.128, "X1": 0.105, "R0":0.128, "X0": 0.128}
},
}
sys1 = grid()
sys1.read(data) # Load data
sys1.pf() # solve power flow
sys1.get_v() # post process voltages
sys1.get_i() # post process currents
v_2_a,v_2_b,v_2_c = phasor2time(sys1.v_abc('Bus_3'))
i_2_a,i_2_b,i_2_c = phasor2time(sys1.i_abc('Bus_3'))
p,q,q_lipo = pq(sys1.v_abc('Bus_3'),sys1.i_abc('Bus_3'))
if __name__ == "__main__":
pass
# test_Dyn11()
# test_Dyg11_3w()
| 33.373134 | 84 | 0.529964 |
from pydgrid import grid
from pydgrid.pydgrid import phasor2time, pq
from pydgrid.pf import pf_eval,time_serie
from pydgrid.electric import bess_vsc, bess_vsc_eval
from pydgrid.simu import simu, f_eval, ini_eval, run_eval
import matplotlib.pyplot as plt
import numpy as np
import time
data = {
"lines":[
{"bus_j": "Bus_1", "bus_k": "Bus_2", "code": "UG1w3", "m": 200},
{"bus_j": "Bus_1", "bus_k": "Bus_3", "code": "UG1w3", "m": 200}
],
"buses":[
{"bus": "Bus_1", "pos_x": 0.0, "pos_y": 0, "units": "m", "U_kV":0.4},
{"bus": "Bus_2", "pos_x": 110.0, "pos_y": 20, "units": "m", "U_kV":0.4},
{"bus": "Bus_3", "pos_x": 110.0, "pos_y":-20, "units": "m", "U_kV":0.4}
],
"grid_formers":[
{"bus": "Bus_1",
"bus_nodes": [1, 2, 3], "deg": [ 18.43101738, -103.41207707, 144.94884128],
"kV": [ 0.24982762, 0.21600212, 0.22831829]}
],
"grid_feeders":[{"bus": "Bus_2","bus_nodes": [1, 2, 3],
"kW": [0,0,0], "kvar": [0,0,0],
"kA": [0,0,0], "phi_deg":[30, 30, 30]},
{"bus": "Bus_3","bus_nodes": [1, 2, 3],
"type":"vsc","control_type":"pq_leon",
"kW": 600.0, "kvar": 200.0,
"L":400e-6, "R":0.01,"V_dc":800.0}
],
"line_codes":
{"pry_al_50": {"R1":0.8, "X1": 0.148, "R0":0.8, "X0": 0.148},
"pry_al_95": {"R1":0.403, "X1": 0.129, "R0":0.403, "X0": 0.129},
"pry_al_120": {"R1":0.321, "X1": 0.123, "R0":0.321, "X0": 0.321},
"pry_al_185": {"R1":0.209, "X1": 0.113, "R0":0.209, "X0": 0.209},
"pry_al_300": {"R1":0.128, "X1": 0.105, "R0":0.128, "X0": 0.128}
},
}
sys1 = grid()
sys1.read(data)
sys1.pf()
sys1.get_v()
sys1.get_i()
v_2_a,v_2_b,v_2_c = phasor2time(sys1.v_abc('Bus_3'))
i_2_a,i_2_b,i_2_c = phasor2time(sys1.i_abc('Bus_3'))
p,q,q_lipo = pq(sys1.v_abc('Bus_3'),sys1.i_abc('Bus_3'))
if __name__ == "__main__":
pass
| true | true |
f71d381fdb5477319a7c92dead591d224115b4b8 | 1,121 | py | Python | sms/forms.py | Ahmed-Dauda/codethinkers-test | 0cf6edbaf6156e975cd1cda3bd2c97aae3afc69f | [
"MIT"
] | null | null | null | sms/forms.py | Ahmed-Dauda/codethinkers-test | 0cf6edbaf6156e975cd1cda3bd2c97aae3afc69f | [
"MIT"
] | null | null | null | sms/forms.py | Ahmed-Dauda/codethinkers-test | 0cf6edbaf6156e975cd1cda3bd2c97aae3afc69f | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm
# from django.contrib.auth.models import User
from django import forms
from django.db import models
from django.forms import ModelForm
from django import forms
from django.db import models
from sms.models import Comment
from users.models import NewUser, BaseUserManager
# class signupform(UserCreationForm):
# """docstring for signupform"""
# # TODO: write code...
# first_name = models.CharField(max_length = 225)
# last_name = models.CharField(max_length = 225)
# # email = models.EmailField(max_length = 225)
# class Meta:
# model = NewUser
# fields = [
# 'first_name',
# 'last_name',
# 'country',
# 'email',
# 'password1',
# 'password2'
# ]
# from sms.models import smsform
class smspostform(ModelForm):
class Meta:
# model = smsform
fields= '__all__'
class feedbackform(ModelForm):
class Meta:
model = Comment
fields= '__all__' | 24.369565 | 54 | 0.597681 | from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.db import models
from django.forms import ModelForm
from django import forms
from django.db import models
from sms.models import Comment
from users.models import NewUser, BaseUserManager
mspostform(ModelForm):
class Meta:
fields= '__all__'
class feedbackform(ModelForm):
class Meta:
model = Comment
fields= '__all__' | true | true |
f71d389398f75d2633d84bc4386f3422cfcf5dd6 | 3,515 | py | Python | rl/policy.py | GyroscopeHQ/keras-rl | 35f9b50c3b35f52722d740e8ee42e33c1750e44a | [
"MIT"
] | 26 | 2018-12-30T20:32:45.000Z | 2022-03-15T06:11:40.000Z | rl/policy.py | GyroscopeHQ/keras-rl | 35f9b50c3b35f52722d740e8ee42e33c1750e44a | [
"MIT"
] | 20 | 2018-08-29T10:34:48.000Z | 2022-03-11T23:16:24.000Z | rl/policy.py | GyroscopeHQ/keras-rl | 35f9b50c3b35f52722d740e8ee42e33c1750e44a | [
"MIT"
] | 13 | 2019-05-11T01:59:58.000Z | 2022-03-15T14:12:40.000Z | from __future__ import division
import numpy as np
from rl.util import *
class Policy(object):
def _set_agent(self, agent):
self.agent = agent
@property
def metrics_names(self):
return []
@property
def metrics(self):
return []
def select_action(self, **kwargs):
raise NotImplementedError()
def get_config(self):
return {}
class LinearAnnealedPolicy(Policy):
def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):
if not hasattr(inner_policy, attr):
raise ValueError('Policy "{}" does not have attribute "{}".'.format(attr))
super(LinearAnnealedPolicy, self).__init__()
self.inner_policy = inner_policy
self.attr = attr
self.value_max = value_max
self.value_min = value_min
self.value_test = value_test
self.nb_steps = nb_steps
def get_current_value(self):
if self.agent.training:
# Linear annealed: f(x) = ax + b.
a = -float(self.value_max - self.value_min) / float(self.nb_steps)
b = float(self.value_max)
value = max(self.value_min, a * float(self.agent.step) + b)
else:
value = self.value_test
return value
def select_action(self, **kwargs):
setattr(self.inner_policy, self.attr, self.get_current_value())
return self.inner_policy.select_action(**kwargs)
@property
def metrics_names(self):
return ['mean_{}'.format(self.attr)]
@property
def metrics(self):
return [getattr(self.inner_policy, self.attr)]
def get_config(self):
config = super(LinearAnnealedPolicy, self).get_config()
config['attr'] = self.attr
config['value_max'] = self.value_max
config['value_min'] = self.value_min
config['value_test'] = self.value_test
config['nb_steps'] = self.nb_steps
config['inner_policy'] = get_object_config(self.inner_policy)
return config
class EpsGreedyQPolicy(Policy):
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
action = np.random.random_integers(0, nb_actions-1)
else:
action = np.argmax(q_values)
return action
def get_config(self):
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
class GreedyQPolicy(Policy):
def select_action(self, q_values):
assert q_values.ndim == 1
action = np.argmax(q_values)
return action
class BoltzmannQPolicy(Policy):
def __init__(self, tau=1., clip=(-500., 500.)):
super(BoltzmannQPolicy, self).__init__()
self.tau = tau
self.clip = clip
def select_action(self, q_values):
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
return action
def get_config(self):
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config
| 28.811475 | 87 | 0.621906 | from __future__ import division
import numpy as np
from rl.util import *
class Policy(object):
def _set_agent(self, agent):
self.agent = agent
@property
def metrics_names(self):
return []
@property
def metrics(self):
return []
def select_action(self, **kwargs):
raise NotImplementedError()
def get_config(self):
return {}
class LinearAnnealedPolicy(Policy):
def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):
if not hasattr(inner_policy, attr):
raise ValueError('Policy "{}" does not have attribute "{}".'.format(attr))
super(LinearAnnealedPolicy, self).__init__()
self.inner_policy = inner_policy
self.attr = attr
self.value_max = value_max
self.value_min = value_min
self.value_test = value_test
self.nb_steps = nb_steps
def get_current_value(self):
if self.agent.training:
a = -float(self.value_max - self.value_min) / float(self.nb_steps)
b = float(self.value_max)
value = max(self.value_min, a * float(self.agent.step) + b)
else:
value = self.value_test
return value
def select_action(self, **kwargs):
setattr(self.inner_policy, self.attr, self.get_current_value())
return self.inner_policy.select_action(**kwargs)
@property
def metrics_names(self):
return ['mean_{}'.format(self.attr)]
@property
def metrics(self):
return [getattr(self.inner_policy, self.attr)]
def get_config(self):
config = super(LinearAnnealedPolicy, self).get_config()
config['attr'] = self.attr
config['value_max'] = self.value_max
config['value_min'] = self.value_min
config['value_test'] = self.value_test
config['nb_steps'] = self.nb_steps
config['inner_policy'] = get_object_config(self.inner_policy)
return config
class EpsGreedyQPolicy(Policy):
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
action = np.random.random_integers(0, nb_actions-1)
else:
action = np.argmax(q_values)
return action
def get_config(self):
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
class GreedyQPolicy(Policy):
def select_action(self, q_values):
assert q_values.ndim == 1
action = np.argmax(q_values)
return action
class BoltzmannQPolicy(Policy):
def __init__(self, tau=1., clip=(-500., 500.)):
super(BoltzmannQPolicy, self).__init__()
self.tau = tau
self.clip = clip
def select_action(self, q_values):
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
return action
def get_config(self):
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config
| true | true |
f71d39022e614e248d8c9266081b47d24579b813 | 9,709 | py | Python | tests/unit/bokeh/model/test_model.py | teresafds/bokeh | 95b2a74ff463cfabdf9e3390951fa380166e6691 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/model/test_model.py | teresafds/bokeh | 95b2a74ff463cfabdf9e3390951fa380166e6691 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/model/test_model.py | teresafds/bokeh | 95b2a74ff463cfabdf9e3390951fa380166e6691 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.core.properties import Int, List, String
from bokeh.models import * # NOQA
from bokeh.models import CustomJS
from bokeh.plotting import * # NOQA
from bokeh.document import document # isort:skip
# Module under test
from bokeh.model import Model # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class SomeModel(Model):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
class Test_js_on_change:
def test_exception_for_no_callbacks(self) -> None:
m = SomeModel()
with pytest.raises(ValueError):
m.js_on_change('foo')
def test_exception_for_bad_callbacks(self) -> None:
m = SomeModel()
for val in [10, "bar", None, [1], {}, 10.2]:
with pytest.raises(ValueError):
m.js_on_change('foo', val)
def test_with_propname(self) -> None:
cb = CustomJS(code="")
m0 = SomeModel()
for name in m0.properties():
m = SomeModel()
m.js_on_change(name, cb)
assert m.js_property_callbacks == {"change:%s" % name: [cb]}
def test_with_non_propname(self) -> None:
cb = CustomJS(code="")
m1 = SomeModel()
m1.js_on_change('foo', cb)
assert m1.js_property_callbacks == {"foo": [cb]}
m2 = SomeModel()
m2.js_on_change('change:b', cb)
assert m2.js_property_callbacks == {"change:b": [cb]}
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1, cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1)
assert m.js_property_callbacks == {"foo": [cb1]}
m.js_on_change('foo', cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb, cb)
assert m.js_property_callbacks == {"foo": [cb]}
class Test_js_on_event:
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1, cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1)
assert m.js_event_callbacks == {"some": [cb1]}
m.js_on_event("some", cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb, cb)
assert m.js_event_callbacks == {"some": [cb]}
def test_ignores_dupe_callbacks_separately(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
class Test_js_link:
def test_value_error_on_bad_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('junk', m2, 'b')
assert str(e.value).endswith("%r is not a property of self (%r)" % ("junk", m1))
def test_value_error_on_bad_other(self) -> None:
m1 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', 'junk', 'b')
assert str(e.value).endswith("'other' is not a Bokeh model: %r" % "junk")
def test_value_error_on_bad_other_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', m2, 'junk')
assert str(e.value).endswith("%r is not a property of other (%r)" % ("junk", m2))
def test_creates_customjs(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b')
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a"
def test_attr_selector_creates_customjs_int(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 1)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[1]"
def test_attr_selector_creates_customjs_with_zero(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 0)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[0]"
def test_attr_selector_creates_customjs_str(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', "test")
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a['test']"
def test_all_builtin_models_default_constructible() -> None:
bad = []
for name, cls in Model.model_class_reverse_map.items():
try:
cls()
except Exception:
bad.append(name)
assert bad == []
def test_select() -> None:
# we aren't trying to replace test_query here, only test
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModel(a=42, name='a')
root2 = SomeModel(a=43, name='c')
root3 = SomeModel(a=44, name='d')
root4 = SomeModel(a=45, name='d')
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
d.add_root(root4)
# select()
assert {root1} == set(root1.select(dict(a=42)))
assert {root1} == set(root1.select(dict(name="a")))
assert {root2} == set(root2.select(dict(name="c")))
assert set() == set(root1.select(dict(name="nope")))
# select() on object
assert set() == set(root3.select(dict(name='a')))
assert {root3} == set(root3.select(dict(a=44)))
# select_one()
assert root3 == root3.select_one(dict(name='d'))
assert root1.select_one(dict(name='nope')) is None
with pytest.raises(ValueError) as e:
d.select_one(dict(name='d'))
assert 'Found more than one' in repr(e)
# select_one() on object
assert root3.select_one(dict(name='a')) is None
assert root3.select_one(dict(name='c')) is None
# set_select()
root1.set_select(dict(a=42), dict(name="c", a=44))
assert {root1} == set(root1.select(dict(name="c")))
assert {root1} == set(root1.select(dict(a=44)))
# set_select() on object
root3.set_select(dict(name='d'), dict(a=57))
assert {root3} == set(root3.select(dict(a=57)))
# set_select() on class
root2.set_select(SomeModel, dict(name='new_name'))
assert {root2} == set(root2.select(dict(name="new_name")))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 35.434307 | 89 | 0.523638 |
from __future__ import annotations
import pytest ; pytest
from bokeh.core.properties import Int, List, String
from bokeh.models import *
from bokeh.models import CustomJS
from bokeh.plotting import *
from bokeh.document import document
from bokeh.model import Model
class SomeModel(Model):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
class Test_js_on_change:
def test_exception_for_no_callbacks(self) -> None:
m = SomeModel()
with pytest.raises(ValueError):
m.js_on_change('foo')
def test_exception_for_bad_callbacks(self) -> None:
m = SomeModel()
for val in [10, "bar", None, [1], {}, 10.2]:
with pytest.raises(ValueError):
m.js_on_change('foo', val)
def test_with_propname(self) -> None:
cb = CustomJS(code="")
m0 = SomeModel()
for name in m0.properties():
m = SomeModel()
m.js_on_change(name, cb)
assert m.js_property_callbacks == {"change:%s" % name: [cb]}
def test_with_non_propname(self) -> None:
cb = CustomJS(code="")
m1 = SomeModel()
m1.js_on_change('foo', cb)
assert m1.js_property_callbacks == {"foo": [cb]}
m2 = SomeModel()
m2.js_on_change('change:b', cb)
assert m2.js_property_callbacks == {"change:b": [cb]}
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1, cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1)
assert m.js_property_callbacks == {"foo": [cb1]}
m.js_on_change('foo', cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb, cb)
assert m.js_property_callbacks == {"foo": [cb]}
class Test_js_on_event:
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1, cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1)
assert m.js_event_callbacks == {"some": [cb1]}
m.js_on_event("some", cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb, cb)
assert m.js_event_callbacks == {"some": [cb]}
def test_ignores_dupe_callbacks_separately(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
class Test_js_link:
def test_value_error_on_bad_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('junk', m2, 'b')
assert str(e.value).endswith("%r is not a property of self (%r)" % ("junk", m1))
def test_value_error_on_bad_other(self) -> None:
m1 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', 'junk', 'b')
assert str(e.value).endswith("'other' is not a Bokeh model: %r" % "junk")
def test_value_error_on_bad_other_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', m2, 'junk')
assert str(e.value).endswith("%r is not a property of other (%r)" % ("junk", m2))
def test_creates_customjs(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b')
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a"
def test_attr_selector_creates_customjs_int(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 1)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[1]"
def test_attr_selector_creates_customjs_with_zero(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 0)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[0]"
def test_attr_selector_creates_customjs_str(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', "test")
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a['test']"
def test_all_builtin_models_default_constructible() -> None:
bad = []
for name, cls in Model.model_class_reverse_map.items():
try:
cls()
except Exception:
bad.append(name)
assert bad == []
def test_select() -> None:
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModel(a=42, name='a')
root2 = SomeModel(a=43, name='c')
root3 = SomeModel(a=44, name='d')
root4 = SomeModel(a=45, name='d')
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
d.add_root(root4)
# select()
assert {root1} == set(root1.select(dict(a=42)))
assert {root1} == set(root1.select(dict(name="a")))
assert {root2} == set(root2.select(dict(name="c")))
assert set() == set(root1.select(dict(name="nope")))
# select() on object
assert set() == set(root3.select(dict(name='a')))
assert {root3} == set(root3.select(dict(a=44)))
# select_one()
assert root3 == root3.select_one(dict(name='d'))
assert root1.select_one(dict(name='nope')) is None
with pytest.raises(ValueError) as e:
d.select_one(dict(name='d'))
assert 'Found more than one' in repr(e)
# select_one() on object
assert root3.select_one(dict(name='a')) is None
assert root3.select_one(dict(name='c')) is None
# set_select()
root1.set_select(dict(a=42), dict(name="c", a=44))
assert {root1} == set(root1.select(dict(name="c")))
assert {root1} == set(root1.select(dict(a=44)))
# set_select() on object
root3.set_select(dict(name='d'), dict(a=57))
assert {root3} == set(root3.select(dict(a=57)))
# set_select() on class
root2.set_select(SomeModel, dict(name='new_name'))
assert {root2} == set(root2.select(dict(name="new_name")))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| true | true |
f71d39a91275757173fe4357be8edb16867f5575 | 3,663 | py | Python | src/opnsense/scripts/systemhealth/activity.py | nowstuseeit/core | 522c21e780342567c41fe262a99d73f4c0c37c2c | [
"BSD-2-Clause"
] | 2 | 2021-02-09T13:05:49.000Z | 2021-02-28T16:56:36.000Z | src/opnsense/scripts/systemhealth/activity.py | nowstuseeit/core | 522c21e780342567c41fe262a99d73f4c0c37c2c | [
"BSD-2-Clause"
] | null | null | null | src/opnsense/scripts/systemhealth/activity.py | nowstuseeit/core | 522c21e780342567c41fe262a99d73f4c0c37c2c | [
"BSD-2-Clause"
] | 1 | 2020-12-31T18:57:24.000Z | 2020-12-31T18:57:24.000Z | #!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
returns system activity (top)
"""
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
sp = subprocess.run(['/usr/bin/top','-aHSn','999999'], capture_output=True, text=True)
is_header = True
for line in sp.stdout.strip().split('\n'):
# end of header, start of top detection
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
# parse headers from top command, add to result
if len(line.strip()) > 0:
result['headers'].append(line)
else:
# parse details including fieldnames (leave original)
if fieldnames is None:
fieldnames = line.split()
else:
tmp = line.split()
record = dict()
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == len(fieldnames)-1:
record[fieldname] = ' '.join(tmp[field_id:])
else:
record[fieldname] = tmp[field_id]
if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
# output as json
print(ujson.dumps(result))
else:
# output plain (reconstruct data)
for header_line in result['headers']:
print (header_line)
print ("\n")
if fieldnames is not None:
format_str = ""
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)
header_fields[fieldname] = fieldname
print (format_str % header_fields)
for detail_line in result['details']:
print (format_str % detail_line)
| 41.625 | 111 | 0.613432 |
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
sp = subprocess.run(['/usr/bin/top','-aHSn','999999'], capture_output=True, text=True)
is_header = True
for line in sp.stdout.strip().split('\n'):
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
if len(line.strip()) > 0:
result['headers'].append(line)
else:
if fieldnames is None:
fieldnames = line.split()
else:
tmp = line.split()
record = dict()
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == len(fieldnames)-1:
record[fieldname] = ' '.join(tmp[field_id:])
else:
record[fieldname] = tmp[field_id]
if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
print(ujson.dumps(result))
else:
for header_line in result['headers']:
print (header_line)
print ("\n")
if fieldnames is not None:
format_str = ""
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)
header_fields[fieldname] = fieldname
print (format_str % header_fields)
for detail_line in result['details']:
print (format_str % detail_line)
| true | true |
f71d3adca28b056b99bf96ade7d5794bf109d89e | 975 | py | Python | compss/programming_model/bindings/python/src/pycompss/runtime/mpi/keys.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 31 | 2018-03-06T09:30:03.000Z | 2022-03-23T09:51:05.000Z | compss/programming_model/bindings/python/src/pycompss/runtime/mpi/keys.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 3 | 2020-08-28T17:16:50.000Z | 2021-11-11T21:58:02.000Z | compss/programming_model/bindings/python/src/pycompss/runtime/mpi/keys.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 15 | 2018-06-07T10:03:27.000Z | 2022-02-23T14:59:42.000Z | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs runtime - MPI - Keys
============================
This file contains the MPI Collection layout keys.
"""
class MPILayoutKeys(object):
"""
Strings used in MPI layout for collections
"""
block_count = 'block_count'
block_length = 'block_length'
stride = 'stride'
| 28.676471 | 75 | 0.691282 |
class MPILayoutKeys(object):
block_count = 'block_count'
block_length = 'block_length'
stride = 'stride'
| true | true |
f71d3b5df39acb05289fd0a6e4d5d0cce131b026 | 4,394 | py | Python | algomorphism/datasets/graph_base.py | efthymis-mcl/algomorphism | 69a41e98e10458ac333da1350fc39da8a00b80d3 | [
"MIT"
] | null | null | null | algomorphism/datasets/graph_base.py | efthymis-mcl/algomorphism | 69a41e98e10458ac333da1350fc39da8a00b80d3 | [
"MIT"
] | null | null | null | algomorphism/datasets/graph_base.py | efthymis-mcl/algomorphism | 69a41e98e10458ac333da1350fc39da8a00b80d3 | [
"MIT"
] | null | null | null | from typing import List
import numpy as np
class GraphBaseDataset(object):
def __int__(self):
pass
@staticmethod
def numpy_to_mega_batch(x_list, a_list):
"""
List of numpy arrays to mega batch array.
Args:
x_list (`list[np.ndarray]`): feature matrixes.
a_list (`list[np.ndarray]`): adjency matrixes.
Returns:
`tuple[np.ndarray, np.ndarray]`: batched x, a lists
Examples:
>>> graph_base = GraphBaseDataset()
>>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]
>>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]
>>> x, a = graph_base.numpy_to_mega_batch(x,a)
>>> print(a.shape)
(12, 6, 6)
>>> print(x.shape)
(12, 6, 4)
"""
def a_post_concat(a):
a_con = np.concatenate([a, np.zeros((a.shape[0], max_d - a.shape[1]))], axis=1)
a_con = np.concatenate([a_con, np.zeros((max_d - a_con.shape[0], a_con.shape[1]))], axis=0)
return a_con
def x_post_concat(x):
x_con = np.concatenate([x, np.zeros((max_d - x.shape[0], x.shape[1]))], axis=0)
return x_con
max_d = max([a.shape[0] for a in a_list])
mega_batch_a = []
mega_batch_x = []
for (x, a) in zip(x_list, a_list):
if a.shape[0] < max_d:
a = a_post_concat(a)
x = x_post_concat(x)
mega_batch_a.append(a)
mega_batch_x.append(x)
mega_batch_a = np.array(mega_batch_a)
mega_batch_x = np.stack(mega_batch_x, axis=0)
return mega_batch_x, mega_batch_a
@staticmethod
def numpy_to_disjoint(x_list, a_list):
"""
Args:
x_list (`List[np.ndarray]`): feature matrixes,
a_list (`List[np.ndarray]`): adajence matrixes.
Returns:
`tuple[np.ndarray, np.ndarray]`: disjoint matrixes of x_list, a_list.
Examples:
>>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]
>>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]
>>> gbd = GraphBaseDataset()
>>> x, a = gbd.numpy_to_disjoint(x_list,a_list)
>>> print(a.shape)
(54, 54)
>>> print(x.shape)
(54, 48)
"""
def zero_padding_concat(x, x_disjoint, nx, ndx):
x_disjoint = np.concatenate([x_disjoint, np.zeros((x_disjoint.shape[0], nx))], axis=1)
x = np.concatenate([np.zeros((x.shape[0], ndx)), x], axis=1)
x_disjoint = np.concatenate([x_disjoint, x], axis=0)
return x_disjoint
a_disjoint = a_list[0]
x_disjoint = x_list[0]
for a, x in zip(a_list[1:], x_list[1:]):
na = a.shape[1]
nda = a_disjoint.shape[1]
nx = x.shape[1]
ndx = x_disjoint.shape[1]
a_disjoint = zero_padding_concat(a, a_disjoint, na, nda)
x_disjoint = zero_padding_concat(x, x_disjoint, nx, ndx)
return x_disjoint, a_disjoint
@staticmethod
def renormalization(a):
"""
Give an adjacency matrix and returns the renormalized.
Args:
a: A ndarray, adjacency matrix.
Returns:
atld: A ndarray, renormalized adjacency matrix.
Examples:
>>> grapbase = GraphBaseDataset()
>>> a = np.array([[[0,1,1], [1,0,0], [1,0,0]]])
>>> atld = grapbase.renormalization(a)
>>> print(atld)
[[[0.33333333 0.40824829 0.40824829]
[0.40824829 0.5 0. ]
[0.40824829 0. 0.5 ]]]
References:
Thomas N. Kipf, Max Welling. Semi-supervised classification with graph convolutional networks,
https://arxiv.org/pdf/1609.02907.pdf
"""
ai = a + np.eye(a.shape[-1])
degree = np.sum(ai, axis=-1)
degree = np.eye(a.shape[-1]) * degree
degree_inv = np.linalg.inv(degree)
degree_inv = np.power(degree_inv, 0.5)
atld = np.matmul(degree_inv, ai)
atld = np.matmul(atld, degree_inv)
return atld
| 34.328125 | 106 | 0.533227 | from typing import List
import numpy as np
class GraphBaseDataset(object):
def __int__(self):
pass
@staticmethod
def numpy_to_mega_batch(x_list, a_list):
def a_post_concat(a):
a_con = np.concatenate([a, np.zeros((a.shape[0], max_d - a.shape[1]))], axis=1)
a_con = np.concatenate([a_con, np.zeros((max_d - a_con.shape[0], a_con.shape[1]))], axis=0)
return a_con
def x_post_concat(x):
x_con = np.concatenate([x, np.zeros((max_d - x.shape[0], x.shape[1]))], axis=0)
return x_con
max_d = max([a.shape[0] for a in a_list])
mega_batch_a = []
mega_batch_x = []
for (x, a) in zip(x_list, a_list):
if a.shape[0] < max_d:
a = a_post_concat(a)
x = x_post_concat(x)
mega_batch_a.append(a)
mega_batch_x.append(x)
mega_batch_a = np.array(mega_batch_a)
mega_batch_x = np.stack(mega_batch_x, axis=0)
return mega_batch_x, mega_batch_a
@staticmethod
def numpy_to_disjoint(x_list, a_list):
def zero_padding_concat(x, x_disjoint, nx, ndx):
x_disjoint = np.concatenate([x_disjoint, np.zeros((x_disjoint.shape[0], nx))], axis=1)
x = np.concatenate([np.zeros((x.shape[0], ndx)), x], axis=1)
x_disjoint = np.concatenate([x_disjoint, x], axis=0)
return x_disjoint
a_disjoint = a_list[0]
x_disjoint = x_list[0]
for a, x in zip(a_list[1:], x_list[1:]):
na = a.shape[1]
nda = a_disjoint.shape[1]
nx = x.shape[1]
ndx = x_disjoint.shape[1]
a_disjoint = zero_padding_concat(a, a_disjoint, na, nda)
x_disjoint = zero_padding_concat(x, x_disjoint, nx, ndx)
return x_disjoint, a_disjoint
@staticmethod
def renormalization(a):
ai = a + np.eye(a.shape[-1])
degree = np.sum(ai, axis=-1)
degree = np.eye(a.shape[-1]) * degree
degree_inv = np.linalg.inv(degree)
degree_inv = np.power(degree_inv, 0.5)
atld = np.matmul(degree_inv, ai)
atld = np.matmul(atld, degree_inv)
return atld
| true | true |
f71d3b6de6e139e51d7cb437f408413aac4abb5d | 2,912 | py | Python | reader/gitlab.py | goncalovalverde/seshat | deff5cdd985f81ac2b4ebd077eea11f7c4f4118f | [
"MIT"
] | 1 | 2020-12-22T13:23:00.000Z | 2020-12-22T13:23:00.000Z | reader/gitlab.py | goncalovalverde/seshat | deff5cdd985f81ac2b4ebd077eea11f7c4f4118f | [
"MIT"
] | 5 | 2020-12-22T13:36:30.000Z | 2021-02-27T05:42:18.000Z | reader/gitlab.py | goncalovalverde/seshat | deff5cdd985f81ac2b4ebd077eea11f7c4f4118f | [
"MIT"
] | null | null | null | import gitlab
import dateutil.parser
import reader.cache
import hashlib
import logging
from pandas import DataFrame, NaT
from datetime import datetime
class Gitlab:
def __init__(self, gitlab_config: dict, workflow: dict):
self.gitlab_config = gitlab_config
self.workflow = workflow
def cache_name(self):
token = self.gitlab_config["token"]
workflow = str(self.workflow)
url = self.gitlab_config["url"]
project_id = (
self.gitlab_config.get("project_id")
if self.gitlab_config.get("project_id")
else self.gitlab_config.get("group_id")
)
name_hashed = hashlib.md5(
(token + url + workflow + str(project_id)).encode("utf-8")
)
return name_hashed.hexdigest()
self.cache = reader.cache.Cache(cache_name(self))
def get_gitlab_instance(self):
gl = gitlab.Gitlab(
self.gitlab_config["url"], private_token=self.gitlab_config["token"]
)
gl.auth()
return gl
def get_issue_data(self, issue):
issue_data = {
"Key": issue.id,
"Type": "issue",
"Creator": issue.author["name"],
"Created": dateutil.parser.parse(issue.created_at).replace(tzinfo=None),
"Done": (
dateutil.parser.parse(issue.created_at).replace(tzinfo=None)
if issue.created_at
else NaT
),
}
return issue_data
def get_issues(self):
gl = self.get_gitlab_instance()
if self.gitlab_config.get("project_id"):
project = gl.projects.get(self.gitlab_config["project_id"])
issues = project.issues.list()
elif self.gitlab_config.get("group_id"):
group = gl.groups.get(self.gitlab_config["group_id"])
issues = group.issues.list()
else:
raise Exception("No valid project_id or group_id found!")
return issues
def get_data(self) -> DataFrame:
if self.gitlab_config["cache"] and self.cache.is_valid():
logging.debug("Getting gitlab data from cache")
df_issue_data = self.cache.read()
return df_issue_data
issues = self.get_issues()
# issue_data = {"Key": [], "Type": [], "Creator": [], "Created": [], "Done": []}
issues_data = [self.get_issue_data(issue) for issue in issues]
df_issues_data = DataFrame(issues_data)
if self.gitlab_config["cache"]:
logging.debug("Storing gitlab issue data in cache")
self.cache.write(df_issues_data)
return df_issues_data
def refresh_data(self, date: datetime) -> DataFrame:
if self.gitlab_config["cache"] and self.cache.is_valid():
self.cache.clean()
return self.get_data()
| 30.978723 | 88 | 0.587569 | import gitlab
import dateutil.parser
import reader.cache
import hashlib
import logging
from pandas import DataFrame, NaT
from datetime import datetime
class Gitlab:
def __init__(self, gitlab_config: dict, workflow: dict):
self.gitlab_config = gitlab_config
self.workflow = workflow
def cache_name(self):
token = self.gitlab_config["token"]
workflow = str(self.workflow)
url = self.gitlab_config["url"]
project_id = (
self.gitlab_config.get("project_id")
if self.gitlab_config.get("project_id")
else self.gitlab_config.get("group_id")
)
name_hashed = hashlib.md5(
(token + url + workflow + str(project_id)).encode("utf-8")
)
return name_hashed.hexdigest()
self.cache = reader.cache.Cache(cache_name(self))
def get_gitlab_instance(self):
gl = gitlab.Gitlab(
self.gitlab_config["url"], private_token=self.gitlab_config["token"]
)
gl.auth()
return gl
def get_issue_data(self, issue):
issue_data = {
"Key": issue.id,
"Type": "issue",
"Creator": issue.author["name"],
"Created": dateutil.parser.parse(issue.created_at).replace(tzinfo=None),
"Done": (
dateutil.parser.parse(issue.created_at).replace(tzinfo=None)
if issue.created_at
else NaT
),
}
return issue_data
def get_issues(self):
gl = self.get_gitlab_instance()
if self.gitlab_config.get("project_id"):
project = gl.projects.get(self.gitlab_config["project_id"])
issues = project.issues.list()
elif self.gitlab_config.get("group_id"):
group = gl.groups.get(self.gitlab_config["group_id"])
issues = group.issues.list()
else:
raise Exception("No valid project_id or group_id found!")
return issues
def get_data(self) -> DataFrame:
if self.gitlab_config["cache"] and self.cache.is_valid():
logging.debug("Getting gitlab data from cache")
df_issue_data = self.cache.read()
return df_issue_data
issues = self.get_issues()
issues_data = [self.get_issue_data(issue) for issue in issues]
df_issues_data = DataFrame(issues_data)
if self.gitlab_config["cache"]:
logging.debug("Storing gitlab issue data in cache")
self.cache.write(df_issues_data)
return df_issues_data
def refresh_data(self, date: datetime) -> DataFrame:
if self.gitlab_config["cache"] and self.cache.is_valid():
self.cache.clean()
return self.get_data()
| true | true |
f71d3c1b03ec24f2d9be14a9d7866efa08692cd5 | 1,886 | py | Python | mmtfPyspark/tests/filters/test_containsDSaccharideChain.py | sbliven/mmtf-pyspark | 3d444178bdc0d5128aafdb1326fec12b5d7634b5 | [
"Apache-2.0"
] | 59 | 2018-01-28T06:50:56.000Z | 2022-02-10T06:07:12.000Z | mmtfPyspark/tests/filters/test_containsDSaccharideChain.py | sbliven/mmtf-pyspark | 3d444178bdc0d5128aafdb1326fec12b5d7634b5 | [
"Apache-2.0"
] | 101 | 2018-02-01T20:51:10.000Z | 2022-01-24T00:50:29.000Z | mmtfPyspark/tests/filters/test_containsDSaccharideChain.py | sbliven/mmtf-pyspark | 3d444178bdc0d5128aafdb1326fec12b5d7634b5 | [
"Apache-2.0"
] | 29 | 2018-01-29T10:09:51.000Z | 2022-01-23T18:53:28.000Z | #!/usr/bin/env python
import unittest
from pyspark.sql import SparkSession
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.filters import ContainsDSaccharideChain
from mmtfPyspark.mappers import *
class ContainsDSaccharideChainTest(unittest.TestCase):
def setUp(self):
self.spark = SparkSession.builder.master("local[*]") \
.appName("ContainsDSaccharideChainTest") \
.getOrCreate()
# 2ONX: only L-protein chain
# 1JLP: single L-protein chains with non-polymer capping group (NH2)
# 5X6H: L-protein and L-DNA chain
# 5L2G: L-DNA chain
# 2MK1: As of V5 of PDBx/mmCIF, saccharides seem to be represented as monomers,
# instead of polysaccharides, so none of these tests returns true anymore.
pdbIds = ['2ONX', '1JLP', '5X6H', '5L2G', '2MK1']
self.pdb = download_mmtf_files(pdbIds)
def test1(self):
pdb_1 = self.pdb.filter(ContainsDSaccharideChain())
results_1 = pdb_1.keys().collect()
self.assertFalse('2ONX' in results_1)
self.assertFalse('1JLP' in results_1)
self.assertFalse('5X6H' in results_1)
self.assertFalse('5L2G' in results_1)
self.assertFalse('2MK1' in results_1)
def test2(self):
pdb_2 = self.pdb.flatMap(StructureToPolymerChains())
pdb_2 = pdb_2.filter(ContainsDSaccharideChain())
results_2 = pdb_2.keys().collect()
self.assertFalse('2ONX.A' in results_2)
self.assertFalse('1JLP.A' in results_2)
self.assertFalse('5X6H.B' in results_2)
self.assertFalse('5L2G.A' in results_2)
self.assertFalse('5L2G.B' in results_2)
self.assertFalse('2MK1.A' in results_2)
def tearDown(self):
self.spark.stop()
if __name__ == '__main__':
unittest.main()
| 34.925926 | 88 | 0.647932 |
import unittest
from pyspark.sql import SparkSession
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.filters import ContainsDSaccharideChain
from mmtfPyspark.mappers import *
class ContainsDSaccharideChainTest(unittest.TestCase):
def setUp(self):
self.spark = SparkSession.builder.master("local[*]") \
.appName("ContainsDSaccharideChainTest") \
.getOrCreate()
pdbIds = ['2ONX', '1JLP', '5X6H', '5L2G', '2MK1']
self.pdb = download_mmtf_files(pdbIds)
def test1(self):
pdb_1 = self.pdb.filter(ContainsDSaccharideChain())
results_1 = pdb_1.keys().collect()
self.assertFalse('2ONX' in results_1)
self.assertFalse('1JLP' in results_1)
self.assertFalse('5X6H' in results_1)
self.assertFalse('5L2G' in results_1)
self.assertFalse('2MK1' in results_1)
def test2(self):
pdb_2 = self.pdb.flatMap(StructureToPolymerChains())
pdb_2 = pdb_2.filter(ContainsDSaccharideChain())
results_2 = pdb_2.keys().collect()
self.assertFalse('2ONX.A' in results_2)
self.assertFalse('1JLP.A' in results_2)
self.assertFalse('5X6H.B' in results_2)
self.assertFalse('5L2G.A' in results_2)
self.assertFalse('5L2G.B' in results_2)
self.assertFalse('2MK1.A' in results_2)
def tearDown(self):
self.spark.stop()
if __name__ == '__main__':
unittest.main()
| true | true |
f71d3c203b2c6f45e6c4757bcfb5b8c221278039 | 3,273 | py | Python | server/server.py | ma-he-sh/MovieMetaPredict | 800ac82542a9cdf027f827fad135b052d01e83ba | [
"MIT"
] | null | null | null | server/server.py | ma-he-sh/MovieMetaPredict | 800ac82542a9cdf027f827fad135b052d01e83ba | [
"MIT"
] | null | null | null | server/server.py | ma-he-sh/MovieMetaPredict | 800ac82542a9cdf027f827fad135b052d01e83ba | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
from flask_cors import CORS
import os
import sys
from modules.db import DB
from modules.redis import REDIS
from config import APP_CONFIG
dbconn = DB()
dbconn.create_table()
redisConn = REDIS()
app = Flask(__name__)
FlaskJSON(app)
# set cors to accept all
cors = CORS(app, resources={"/metaserver/api/*": {"origins": "*"}})
NAMESPACE='metaserver/api'
#ping
@app.route(f"/{NAMESPACE}/ping", methods=["POST", "GET"])
def index():
payload = {
'msg' : 'pong'
}
return json_response( callback=payload )
#get title
@app.route(f"/{NAMESPACE}/title/<movietitle>", methods=["GET"])
def get_title(movietitle):
sample_payload = {
"id": "tt0068646",
"href": "https://imdb.com/title/tt0068646",
"title": "The Godfather",
"year": "1972",
"meta": {
"certificate": "18A",
"runtime": "175 min",
"genre": ["Crime", "Drama"],
"meta_score": "100",
"description": "An organized crime ...",
"directors": [
"Francis Ford Coppola"
],
"votes": "1,628,276",
"gross": "$134.97M",
"awards": {
"Actors": "5 Stars",
"Direction": "5 Stars",
"Screenplay": "5 Stars",
"Oscars": "3",
"Oscar Nominations": "11",
"BAFTA Awards": "0",
"BAFTA Nominations": "4",
"Golden Globes": "6",
"Golden Globe Nominations": "8"
},
"cast": [
{
"actor": "Marlon Brando",
"actor_link": "/name/nm0000008/",
"character": "Don Vito Corleone",
"character_link": "/title/tt0068646/characters/nm0000008"
},
...
],
"Country:": ["USA"],
"Language:": ["English","Italian","Latin"],
"Release Date:": "24 March 1972 (Canada)",
"Also Known As:": "Le parrain",
"Filming Locations:": [
"NY Eye and Ear Infirmary, 2nd Avenue & East 13th Street, New York City, New York, USA"
],
"Budget:": "$6,000,000 (estimated)",
"Opening Weekend USA:": "$302,393,19 March 1972",
"Gross USA:": "$134,966,411",
"Cumulative Worldwide Gross:": "$246,120,986",
"Production Co:": ["Paramount Pictures", "Alfran Productions"],
"Runtime:": "175 min",
"Sound Mix:": ["DTS", "Mono"],
"Color:": ["Color"],
"Aspect Ratio:": "1.85 : 1",
"storyline": "The Godfather ....",
"rating": "18A"
}
}
payload = {
'title' : movietitle,
'payload': sample_payload,
}
return json_response( callback=payload )
@app.errorhandler(400)
def page_not_found(e):
payload = {
'success' : False,
'error' : 'Page Not Found',
}
return json_response( resp=payload )
if __name__ == '__main__':
app.run( debug=True, host='0.0.0.0', port=8282, threaded=True )
| 30.027523 | 103 | 0.499542 | from flask import Flask, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
from flask_cors import CORS
import os
import sys
from modules.db import DB
from modules.redis import REDIS
from config import APP_CONFIG
dbconn = DB()
dbconn.create_table()
redisConn = REDIS()
app = Flask(__name__)
FlaskJSON(app)
cors = CORS(app, resources={"/metaserver/api/*": {"origins": "*"}})
NAMESPACE='metaserver/api'
@app.route(f"/{NAMESPACE}/ping", methods=["POST", "GET"])
def index():
payload = {
'msg' : 'pong'
}
return json_response( callback=payload )
@app.route(f"/{NAMESPACE}/title/<movietitle>", methods=["GET"])
def get_title(movietitle):
sample_payload = {
"id": "tt0068646",
"href": "https://imdb.com/title/tt0068646",
"title": "The Godfather",
"year": "1972",
"meta": {
"certificate": "18A",
"runtime": "175 min",
"genre": ["Crime", "Drama"],
"meta_score": "100",
"description": "An organized crime ...",
"directors": [
"Francis Ford Coppola"
],
"votes": "1,628,276",
"gross": "$134.97M",
"awards": {
"Actors": "5 Stars",
"Direction": "5 Stars",
"Screenplay": "5 Stars",
"Oscars": "3",
"Oscar Nominations": "11",
"BAFTA Awards": "0",
"BAFTA Nominations": "4",
"Golden Globes": "6",
"Golden Globe Nominations": "8"
},
"cast": [
{
"actor": "Marlon Brando",
"actor_link": "/name/nm0000008/",
"character": "Don Vito Corleone",
"character_link": "/title/tt0068646/characters/nm0000008"
},
...
],
"Country:": ["USA"],
"Language:": ["English","Italian","Latin"],
"Release Date:": "24 March 1972 (Canada)",
"Also Known As:": "Le parrain",
"Filming Locations:": [
"NY Eye and Ear Infirmary, 2nd Avenue & East 13th Street, New York City, New York, USA"
],
"Budget:": "$6,000,000 (estimated)",
"Opening Weekend USA:": "$302,393,19 March 1972",
"Gross USA:": "$134,966,411",
"Cumulative Worldwide Gross:": "$246,120,986",
"Production Co:": ["Paramount Pictures", "Alfran Productions"],
"Runtime:": "175 min",
"Sound Mix:": ["DTS", "Mono"],
"Color:": ["Color"],
"Aspect Ratio:": "1.85 : 1",
"storyline": "The Godfather ....",
"rating": "18A"
}
}
payload = {
'title' : movietitle,
'payload': sample_payload,
}
return json_response( callback=payload )
@app.errorhandler(400)
def page_not_found(e):
payload = {
'success' : False,
'error' : 'Page Not Found',
}
return json_response( resp=payload )
if __name__ == '__main__':
app.run( debug=True, host='0.0.0.0', port=8282, threaded=True )
| true | true |
f71d3c6ceffdf1df60e58be8ba03d0a46747639d | 17,549 | py | Python | pdfplumber/utils.py | guo1017138/pdfplumber | 42564e074ed6ae738333fbbd046f43238faece12 | [
"MIT"
] | 1 | 2021-08-03T16:36:36.000Z | 2021-08-03T16:36:36.000Z | pdfplumber/utils.py | guo1017138/pdfplumber | 42564e074ed6ae738333fbbd046f43238faece12 | [
"MIT"
] | null | null | null | pdfplumber/utils.py | guo1017138/pdfplumber | 42564e074ed6ae738333fbbd046f43238faece12 | [
"MIT"
] | null | null | null | from pdfminer.utils import PDFDocEncoding
from pdfminer.psparser import PSLiteral
from pdfminer.pdftypes import PDFObjRef
from decimal import Decimal, ROUND_HALF_UP
import numbers
from operator import itemgetter
import itertools
from functools import lru_cache as cache
DEFAULT_X_TOLERANCE = 3
DEFAULT_Y_TOLERANCE = 3
def cluster_list(xs, tolerance=0):
tolerance = decimalize(tolerance)
if tolerance == Decimal(0):
return [[x] for x in sorted(xs)]
if len(xs) < 2:
return [[x] for x in sorted(xs)]
groups = []
xs = list(sorted(xs))
current_group = [xs[0]]
last = xs[0]
for x in xs[1:]:
if x <= (last + tolerance):
current_group.append(x)
else:
groups.append(current_group)
current_group = [x]
last = x
groups.append(current_group)
return groups
def make_cluster_dict(values, tolerance):
tolerance = decimalize(tolerance)
clusters = cluster_list(set(values), tolerance)
nested_tuples = [
[(val, i) for val in value_cluster] for i, value_cluster in enumerate(clusters)
]
cluster_dict = dict(itertools.chain(*nested_tuples))
return cluster_dict
def cluster_objects(objs, attr, tolerance):
if isinstance(attr, (str, int)):
attr_getter = itemgetter(attr)
else:
attr_getter = attr
objs = to_list(objs)
values = map(attr_getter, objs)
cluster_dict = make_cluster_dict(values, tolerance)
get_0, get_1 = itemgetter(0), itemgetter(1)
cluster_tuples = sorted(
((obj, cluster_dict.get(attr_getter(obj))) for obj in objs), key=get_1
)
grouped = itertools.groupby(cluster_tuples, key=get_1)
clusters = [list(map(get_0, v)) for k, v in grouped]
return clusters
def decode_text(s):
"""
Decodes a PDFDocEncoding string to Unicode.
Adds py3 compatibility to pdfminer's version.
"""
if type(s) == bytes and s.startswith(b"\xfe\xff"):
return str(s[2:], "utf-16be", "ignore")
else:
ords = (ord(c) if type(c) == str else c for c in s)
return "".join(PDFDocEncoding[o] for o in ords)
def decode_psl_list(_list):
return [
decode_text(value.name) if isinstance(value, PSLiteral) else value
for value in _list
]
def resolve(x):
if type(x) == PDFObjRef:
return x.resolve()
else:
return x
def get_dict_type(d):
if type(d) is not dict:
return None
t = d.get("Type")
if type(t) is PSLiteral:
return decode_text(t.name)
else:
return t
def resolve_all(x):
"""
Recursively resolves the given object and all the internals.
"""
t = type(x)
if t == PDFObjRef:
resolved = x.resolve()
# Avoid infinite recursion
if get_dict_type(resolved) == "Page":
return x
return resolve_all(resolved)
elif t in (list, tuple):
return t(resolve_all(v) for v in x)
elif t == dict:
if get_dict_type(x) == "Annot":
exceptions = ["Parent"]
else:
exceptions = []
return dict((k, v if k in exceptions else resolve_all(v)) for k, v in x.items())
else:
return x
@cache(maxsize=int(10e4))
def _decimalize(v, q=None):
# Convert int-like
if isinstance(v, numbers.Integral):
return Decimal(int(v))
# Convert float-like
elif isinstance(v, numbers.Real):
if q is not None:
return Decimal(repr(v)).quantize(Decimal(repr(q)), rounding=ROUND_HALF_UP)
else:
return Decimal(repr(v))
else:
raise ValueError(f"Cannot convert {v} to Decimal.")
def decimalize(v, q=None):
# If already a decimal, just return itself
if type(v) == Decimal:
return v
# If tuple/list passed, bulk-convert
if isinstance(v, (tuple, list)):
return type(v)(decimalize(x, q) for x in v)
else:
return _decimalize(v, q)
def is_dataframe(collection):
cls = collection.__class__
name = ".".join([cls.__module__, cls.__name__])
return name == "pandas.core.frame.DataFrame"
def to_list(collection):
if is_dataframe(collection):
return collection.to_dict("records") # pragma: nocover
else:
return list(collection)
def dedupe_chars(chars, tolerance=1):
"""
Removes duplicate chars — those sharing the same text, fontname, size,
and positioning (within `tolerance`) as other characters in the set.
"""
key = itemgetter("fontname", "size", "upright", "text")
pos_key = itemgetter("doctop", "x0")
t = decimalize(tolerance)
def yield_unique_chars(chars):
sorted_chars = sorted(chars, key=key)
for grp, grp_chars in itertools.groupby(sorted_chars, key=key):
for y_cluster in cluster_objects(grp_chars, "doctop", t):
for x_cluster in cluster_objects(y_cluster, "x0", t):
yield sorted(x_cluster, key=pos_key)[0]
deduped = yield_unique_chars(chars)
return sorted(deduped, key=chars.index)
def collate_line(line_chars, tolerance=DEFAULT_X_TOLERANCE):
tolerance = decimalize(tolerance)
coll = ""
last_x1 = None
for char in sorted(line_chars, key=itemgetter("x0")):
if (last_x1 is not None) and (char["x0"] > (last_x1 + tolerance)):
coll += " "
last_x1 = char["x1"]
coll += char["text"]
return coll
def objects_to_rect(objects):
return {
"x0": min(map(itemgetter("x0"), objects)),
"x1": max(map(itemgetter("x1"), objects)),
"top": min(map(itemgetter("top"), objects)),
"bottom": max(map(itemgetter("bottom"), objects)),
}
def objects_to_bbox(objects):
return (
min(map(itemgetter("x0"), objects)),
min(map(itemgetter("top"), objects)),
max(map(itemgetter("x1"), objects)),
max(map(itemgetter("bottom"), objects)),
)
obj_to_bbox = itemgetter("x0", "top", "x1", "bottom")
def bbox_to_rect(bbox):
return {"x0": bbox[0], "top": bbox[1], "x1": bbox[2], "bottom": bbox[3]}
DEFAULT_WORD_EXTRACTION_SETTINGS = dict(
x_tolerance=DEFAULT_X_TOLERANCE,
y_tolerance=DEFAULT_Y_TOLERANCE,
keep_blank_chars=False,
use_text_flow=False,
horizontal_ltr=True, # Should words be read left-to-right?
vertical_ttb=True, # Should vertical words be read top-to-bottom?
extra_attrs=[],
)
class WordExtractor:
def __init__(self, **settings):
for s, val in settings.items():
if s not in DEFAULT_WORD_EXTRACTION_SETTINGS:
raise ValueError(f"{s} is not a valid WordExtractor parameter")
if s in ["x_tolerance", "y_tolerance"]:
val = decimalize(val)
setattr(self, s, val)
def merge_chars(self, ordered_chars):
x0, top, x1, bottom = objects_to_bbox(ordered_chars)
upright = ordered_chars[0]["upright"]
direction = 1 if (self.horizontal_ltr if upright else self.vertical_ttb) else -1
word = {
"text": "".join(map(itemgetter("text"), ordered_chars)),
"x0": x0,
"x1": x1,
"top": top,
"bottom": bottom,
"upright": upright,
"direction": direction,
}
for key in self.extra_attrs:
word[key] = ordered_chars[0][key]
return word
def char_begins_new_word(self, current_chars, next_char):
upright = current_chars[0]["upright"]
intraline_tol = self.x_tolerance if upright else self.y_tolerance
interline_tol = self.y_tolerance if upright else self.x_tolerance
word_x0, word_top, word_x1, word_bottom = objects_to_bbox(current_chars)
return (
(next_char["x0"] > word_x1 + intraline_tol)
or (next_char["x1"] < word_x0 - intraline_tol)
or (next_char["top"] > word_bottom + interline_tol)
or (next_char["bottom"] < word_top - interline_tol)
)
def iter_chars_to_words(self, chars):
current_word = []
for char in chars:
if not self.keep_blank_chars and char["text"].isspace():
if current_word:
yield current_word
current_word = []
elif current_word and self.char_begins_new_word(current_word, char):
yield current_word
current_word = [char]
else:
current_word.append(char)
if current_word:
yield current_word
def iter_sort_chars(self, chars):
def upright_key(x):
return -int(x["upright"])
for upright_cluster in cluster_objects(chars, upright_key, 0):
upright = upright_cluster[0]["upright"]
cluster_key = "doctop" if upright else "x0"
# Cluster by line
subclusters = cluster_objects(
upright_cluster, cluster_key, self.y_tolerance
)
for sc in subclusters:
# Sort within line
sort_key = "x0" if upright else "doctop"
sc = sorted(sc, key=itemgetter(sort_key))
# Reverse order if necessary
if not (self.horizontal_ltr if upright else self.vertical_ttb):
sc = reversed(sc)
yield from sc
def iter_extract(self, chars):
if not self.use_text_flow:
chars = self.iter_sort_chars(chars)
grouping_key = itemgetter("upright", *self.extra_attrs)
grouped = itertools.groupby(chars, grouping_key)
for keyvals, char_group in grouped:
for word_chars in self.iter_chars_to_words(char_group):
yield self.merge_chars(word_chars)
def extract(self, chars):
return list(self.iter_extract(chars))
def extract_words(chars, **kwargs):
settings = dict(DEFAULT_WORD_EXTRACTION_SETTINGS)
settings.update(kwargs)
return WordExtractor(**settings).extract(chars)
def extract_text(
chars, x_tolerance=DEFAULT_X_TOLERANCE, y_tolerance=DEFAULT_Y_TOLERANCE
):
if len(chars) == 0:
return None
chars = to_list(chars)
doctop_clusters = cluster_objects(chars, "doctop", y_tolerance)
lines = (collate_line(line_chars, x_tolerance) for line_chars in doctop_clusters)
coll = "\n".join(lines)
return coll
collate_chars = extract_text
def filter_objects(objs, fn):
if isinstance(objs, dict):
return dict((k, filter_objects(v, fn)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
filtered = filter(fn, objs)
return initial_type(filtered)
def get_bbox_overlap(a, b):
a_left, a_top, a_right, a_bottom = decimalize(a)
b_left, b_top, b_right, b_bottom = decimalize(b)
o_left = max(a_left, b_left)
o_right = min(a_right, b_right)
o_bottom = min(a_bottom, b_bottom)
o_top = max(a_top, b_top)
o_width = o_right - o_left
o_height = o_bottom - o_top
if o_height >= 0 and o_width >= 0 and o_height + o_width > 0:
return (o_left, o_top, o_right, o_bottom)
else:
return None
def calculate_area(bbox):
left, top, right, bottom = bbox
if left > right or top > bottom:
raise ValueError(f"{bbox} has a negative width or height.")
return (right - left) * (bottom - top)
def clip_obj(obj, bbox):
bbox = decimalize(bbox)
overlap = get_bbox_overlap(obj_to_bbox(obj), bbox)
if overlap is None:
return None
dims = bbox_to_rect(overlap)
copy = dict(obj)
for attr in ["x0", "top", "x1", "bottom"]:
copy[attr] = dims[attr]
if dims["top"] != obj["bottom"] or dims["top"] != obj["bottom"]:
diff = dims["top"] - obj["top"]
copy["doctop"] = obj["doctop"] + diff
copy["width"] = copy["x1"] - copy["x0"]
copy["height"] = copy["bottom"] - copy["top"]
return copy
def intersects_bbox(objs, bbox):
"""
Filters objs to only those intersecting the bbox
"""
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj for obj in objs if get_bbox_overlap(obj_to_bbox(obj), bbox) is not None
]
return initial_type(matching)
def within_bbox(objs, bbox):
"""
Filters objs to only those fully within the bbox
"""
if isinstance(objs, dict):
return dict((k, within_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj
for obj in objs
if get_bbox_overlap(obj_to_bbox(obj), bbox) == obj_to_bbox(obj)
]
return initial_type(matching)
def crop_to_bbox(objs, bbox):
"""
Filters objs to only those intersecting the bbox,
and crops the extent of the objects to the bbox.
"""
if isinstance(objs, dict):
return dict((k, crop_to_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
cropped = list(filter(None, (clip_obj(obj, bbox) for obj in objs)))
return initial_type(cropped)
def move_object(obj, axis, value):
assert axis in ("h", "v")
if axis == "h":
new_items = (
("x0", obj["x0"] + value),
("x1", obj["x1"] + value),
)
if axis == "v":
new_items = [
("top", obj["top"] + value),
("bottom", obj["bottom"] + value),
]
if "doctop" in obj:
new_items += [("doctop", obj["doctop"] + value)]
if "y0" in obj:
new_items += [
("y0", obj["y0"] - value),
("y1", obj["y1"] - value),
]
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def snap_objects(objs, attr, tolerance):
axis = {"x0": "h", "x1": "h", "top": "v", "bottom": "v"}[attr]
clusters = cluster_objects(objs, attr, tolerance)
avgs = [sum(map(itemgetter(attr), objs)) / len(objs) for objs in clusters]
snapped_clusters = [
[move_object(obj, axis, avg - obj[attr]) for obj in cluster]
for cluster, avg in zip(clusters, avgs)
]
return list(itertools.chain(*snapped_clusters))
def resize_object(obj, key, value):
assert key in ("x0", "x1", "top", "bottom")
old_value = obj[key]
diff = value - old_value
new_items = [
(key, value),
]
if key == "x0":
assert value <= obj["x1"]
new_items.append(("width", obj["x1"] - value))
elif key == "x1":
assert value >= obj["x0"]
new_items.append(("width", value - obj["x0"]))
elif key == "top":
assert value <= obj["bottom"]
new_items.append(("doctop", obj["doctop"] + diff))
new_items.append(("height", obj["height"] - diff))
if "y1" in obj:
new_items.append(("y1", obj["y1"] - diff))
elif key == "bottom":
assert value >= obj["top"]
new_items.append(("height", obj["height"] + diff))
if "y0" in obj:
new_items.append(("y0", obj["y0"] - diff))
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def curve_to_edges(curve):
point_pairs = zip(curve["points"], curve["points"][1:])
return [
{
"x0": min(p0[0], p1[0]),
"x1": max(p0[0], p1[0]),
"top": min(p0[1], p1[1]),
"doctop": min(p0[1], p1[1]) + (curve["doctop"] - curve["top"]),
"bottom": max(p0[1], p1[1]),
"width": abs(p0[0] - p1[0]),
"height": abs(p0[1] - p1[1]),
"orientation": "v" if p0[0] == p1[0] else ("h" if p0[1] == p1[1] else None),
}
for p0, p1 in point_pairs
]
def rect_to_edges(rect):
top, bottom, left, right = [dict(rect) for x in range(4)]
top.update(
{
"object_type": "rect_edge",
"height": decimalize(0),
"y0": rect["y1"],
"bottom": rect["top"],
"orientation": "h",
}
)
bottom.update(
{
"object_type": "rect_edge",
"height": decimalize(0),
"y1": rect["y0"],
"top": rect["top"] + rect["height"],
"doctop": rect["doctop"] + rect["height"],
"orientation": "h",
}
)
left.update(
{
"object_type": "rect_edge",
"width": decimalize(0),
"x1": rect["x0"],
"orientation": "v",
}
)
right.update(
{
"object_type": "rect_edge",
"width": decimalize(0),
"x0": rect["x1"],
"orientation": "v",
}
)
return [top, bottom, left, right]
def line_to_edge(line):
edge = dict(line)
edge["orientation"] = "h" if (line["top"] == line["bottom"]) else "v"
return edge
def obj_to_edges(obj):
return {
"line": lambda x: [line_to_edge(x)],
"rect": rect_to_edges,
"rect_edge": rect_to_edges,
"curve": curve_to_edges,
}[obj["object_type"]](obj)
def filter_edges(edges, orientation=None, edge_type=None, min_length=1):
if orientation not in ("v", "h", None):
raise ValueError("Orientation must be 'v' or 'h'")
def test(e):
dim = "height" if e["orientation"] == "v" else "width"
et_correct = e["object_type"] == edge_type if edge_type is not None else True
orient_correct = orientation is None or e["orientation"] == orientation
return et_correct and orient_correct and (e[dim] >= min_length)
edges = filter(test, edges)
return list(edges)
| 28.674837 | 88 | 0.587669 | from pdfminer.utils import PDFDocEncoding
from pdfminer.psparser import PSLiteral
from pdfminer.pdftypes import PDFObjRef
from decimal import Decimal, ROUND_HALF_UP
import numbers
from operator import itemgetter
import itertools
from functools import lru_cache as cache
DEFAULT_X_TOLERANCE = 3
DEFAULT_Y_TOLERANCE = 3
def cluster_list(xs, tolerance=0):
tolerance = decimalize(tolerance)
if tolerance == Decimal(0):
return [[x] for x in sorted(xs)]
if len(xs) < 2:
return [[x] for x in sorted(xs)]
groups = []
xs = list(sorted(xs))
current_group = [xs[0]]
last = xs[0]
for x in xs[1:]:
if x <= (last + tolerance):
current_group.append(x)
else:
groups.append(current_group)
current_group = [x]
last = x
groups.append(current_group)
return groups
def make_cluster_dict(values, tolerance):
tolerance = decimalize(tolerance)
clusters = cluster_list(set(values), tolerance)
nested_tuples = [
[(val, i) for val in value_cluster] for i, value_cluster in enumerate(clusters)
]
cluster_dict = dict(itertools.chain(*nested_tuples))
return cluster_dict
def cluster_objects(objs, attr, tolerance):
if isinstance(attr, (str, int)):
attr_getter = itemgetter(attr)
else:
attr_getter = attr
objs = to_list(objs)
values = map(attr_getter, objs)
cluster_dict = make_cluster_dict(values, tolerance)
get_0, get_1 = itemgetter(0), itemgetter(1)
cluster_tuples = sorted(
((obj, cluster_dict.get(attr_getter(obj))) for obj in objs), key=get_1
)
grouped = itertools.groupby(cluster_tuples, key=get_1)
clusters = [list(map(get_0, v)) for k, v in grouped]
return clusters
def decode_text(s):
if type(s) == bytes and s.startswith(b"\xfe\xff"):
return str(s[2:], "utf-16be", "ignore")
else:
ords = (ord(c) if type(c) == str else c for c in s)
return "".join(PDFDocEncoding[o] for o in ords)
def decode_psl_list(_list):
return [
decode_text(value.name) if isinstance(value, PSLiteral) else value
for value in _list
]
def resolve(x):
if type(x) == PDFObjRef:
return x.resolve()
else:
return x
def get_dict_type(d):
if type(d) is not dict:
return None
t = d.get("Type")
if type(t) is PSLiteral:
return decode_text(t.name)
else:
return t
def resolve_all(x):
t = type(x)
if t == PDFObjRef:
resolved = x.resolve()
if get_dict_type(resolved) == "Page":
return x
return resolve_all(resolved)
elif t in (list, tuple):
return t(resolve_all(v) for v in x)
elif t == dict:
if get_dict_type(x) == "Annot":
exceptions = ["Parent"]
else:
exceptions = []
return dict((k, v if k in exceptions else resolve_all(v)) for k, v in x.items())
else:
return x
@cache(maxsize=int(10e4))
def _decimalize(v, q=None):
if isinstance(v, numbers.Integral):
return Decimal(int(v))
elif isinstance(v, numbers.Real):
if q is not None:
return Decimal(repr(v)).quantize(Decimal(repr(q)), rounding=ROUND_HALF_UP)
else:
return Decimal(repr(v))
else:
raise ValueError(f"Cannot convert {v} to Decimal.")
def decimalize(v, q=None):
if type(v) == Decimal:
return v
if isinstance(v, (tuple, list)):
return type(v)(decimalize(x, q) for x in v)
else:
return _decimalize(v, q)
def is_dataframe(collection):
cls = collection.__class__
name = ".".join([cls.__module__, cls.__name__])
return name == "pandas.core.frame.DataFrame"
def to_list(collection):
if is_dataframe(collection):
return collection.to_dict("records")
else:
return list(collection)
def dedupe_chars(chars, tolerance=1):
key = itemgetter("fontname", "size", "upright", "text")
pos_key = itemgetter("doctop", "x0")
t = decimalize(tolerance)
def yield_unique_chars(chars):
sorted_chars = sorted(chars, key=key)
for grp, grp_chars in itertools.groupby(sorted_chars, key=key):
for y_cluster in cluster_objects(grp_chars, "doctop", t):
for x_cluster in cluster_objects(y_cluster, "x0", t):
yield sorted(x_cluster, key=pos_key)[0]
deduped = yield_unique_chars(chars)
return sorted(deduped, key=chars.index)
def collate_line(line_chars, tolerance=DEFAULT_X_TOLERANCE):
tolerance = decimalize(tolerance)
coll = ""
last_x1 = None
for char in sorted(line_chars, key=itemgetter("x0")):
if (last_x1 is not None) and (char["x0"] > (last_x1 + tolerance)):
coll += " "
last_x1 = char["x1"]
coll += char["text"]
return coll
def objects_to_rect(objects):
return {
"x0": min(map(itemgetter("x0"), objects)),
"x1": max(map(itemgetter("x1"), objects)),
"top": min(map(itemgetter("top"), objects)),
"bottom": max(map(itemgetter("bottom"), objects)),
}
def objects_to_bbox(objects):
return (
min(map(itemgetter("x0"), objects)),
min(map(itemgetter("top"), objects)),
max(map(itemgetter("x1"), objects)),
max(map(itemgetter("bottom"), objects)),
)
obj_to_bbox = itemgetter("x0", "top", "x1", "bottom")
def bbox_to_rect(bbox):
return {"x0": bbox[0], "top": bbox[1], "x1": bbox[2], "bottom": bbox[3]}
DEFAULT_WORD_EXTRACTION_SETTINGS = dict(
x_tolerance=DEFAULT_X_TOLERANCE,
y_tolerance=DEFAULT_Y_TOLERANCE,
keep_blank_chars=False,
use_text_flow=False,
horizontal_ltr=True,
vertical_ttb=True,
extra_attrs=[],
)
class WordExtractor:
def __init__(self, **settings):
for s, val in settings.items():
if s not in DEFAULT_WORD_EXTRACTION_SETTINGS:
raise ValueError(f"{s} is not a valid WordExtractor parameter")
if s in ["x_tolerance", "y_tolerance"]:
val = decimalize(val)
setattr(self, s, val)
def merge_chars(self, ordered_chars):
x0, top, x1, bottom = objects_to_bbox(ordered_chars)
upright = ordered_chars[0]["upright"]
direction = 1 if (self.horizontal_ltr if upright else self.vertical_ttb) else -1
word = {
"text": "".join(map(itemgetter("text"), ordered_chars)),
"x0": x0,
"x1": x1,
"top": top,
"bottom": bottom,
"upright": upright,
"direction": direction,
}
for key in self.extra_attrs:
word[key] = ordered_chars[0][key]
return word
def char_begins_new_word(self, current_chars, next_char):
upright = current_chars[0]["upright"]
intraline_tol = self.x_tolerance if upright else self.y_tolerance
interline_tol = self.y_tolerance if upright else self.x_tolerance
word_x0, word_top, word_x1, word_bottom = objects_to_bbox(current_chars)
return (
(next_char["x0"] > word_x1 + intraline_tol)
or (next_char["x1"] < word_x0 - intraline_tol)
or (next_char["top"] > word_bottom + interline_tol)
or (next_char["bottom"] < word_top - interline_tol)
)
def iter_chars_to_words(self, chars):
current_word = []
for char in chars:
if not self.keep_blank_chars and char["text"].isspace():
if current_word:
yield current_word
current_word = []
elif current_word and self.char_begins_new_word(current_word, char):
yield current_word
current_word = [char]
else:
current_word.append(char)
if current_word:
yield current_word
def iter_sort_chars(self, chars):
def upright_key(x):
return -int(x["upright"])
for upright_cluster in cluster_objects(chars, upright_key, 0):
upright = upright_cluster[0]["upright"]
cluster_key = "doctop" if upright else "x0"
subclusters = cluster_objects(
upright_cluster, cluster_key, self.y_tolerance
)
for sc in subclusters:
sort_key = "x0" if upright else "doctop"
sc = sorted(sc, key=itemgetter(sort_key))
if not (self.horizontal_ltr if upright else self.vertical_ttb):
sc = reversed(sc)
yield from sc
def iter_extract(self, chars):
if not self.use_text_flow:
chars = self.iter_sort_chars(chars)
grouping_key = itemgetter("upright", *self.extra_attrs)
grouped = itertools.groupby(chars, grouping_key)
for keyvals, char_group in grouped:
for word_chars in self.iter_chars_to_words(char_group):
yield self.merge_chars(word_chars)
def extract(self, chars):
return list(self.iter_extract(chars))
def extract_words(chars, **kwargs):
settings = dict(DEFAULT_WORD_EXTRACTION_SETTINGS)
settings.update(kwargs)
return WordExtractor(**settings).extract(chars)
def extract_text(
chars, x_tolerance=DEFAULT_X_TOLERANCE, y_tolerance=DEFAULT_Y_TOLERANCE
):
if len(chars) == 0:
return None
chars = to_list(chars)
doctop_clusters = cluster_objects(chars, "doctop", y_tolerance)
lines = (collate_line(line_chars, x_tolerance) for line_chars in doctop_clusters)
coll = "\n".join(lines)
return coll
collate_chars = extract_text
def filter_objects(objs, fn):
if isinstance(objs, dict):
return dict((k, filter_objects(v, fn)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
filtered = filter(fn, objs)
return initial_type(filtered)
def get_bbox_overlap(a, b):
a_left, a_top, a_right, a_bottom = decimalize(a)
b_left, b_top, b_right, b_bottom = decimalize(b)
o_left = max(a_left, b_left)
o_right = min(a_right, b_right)
o_bottom = min(a_bottom, b_bottom)
o_top = max(a_top, b_top)
o_width = o_right - o_left
o_height = o_bottom - o_top
if o_height >= 0 and o_width >= 0 and o_height + o_width > 0:
return (o_left, o_top, o_right, o_bottom)
else:
return None
def calculate_area(bbox):
left, top, right, bottom = bbox
if left > right or top > bottom:
raise ValueError(f"{bbox} has a negative width or height.")
return (right - left) * (bottom - top)
def clip_obj(obj, bbox):
bbox = decimalize(bbox)
overlap = get_bbox_overlap(obj_to_bbox(obj), bbox)
if overlap is None:
return None
dims = bbox_to_rect(overlap)
copy = dict(obj)
for attr in ["x0", "top", "x1", "bottom"]:
copy[attr] = dims[attr]
if dims["top"] != obj["bottom"] or dims["top"] != obj["bottom"]:
diff = dims["top"] - obj["top"]
copy["doctop"] = obj["doctop"] + diff
copy["width"] = copy["x1"] - copy["x0"]
copy["height"] = copy["bottom"] - copy["top"]
return copy
def intersects_bbox(objs, bbox):
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj for obj in objs if get_bbox_overlap(obj_to_bbox(obj), bbox) is not None
]
return initial_type(matching)
def within_bbox(objs, bbox):
if isinstance(objs, dict):
return dict((k, within_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj
for obj in objs
if get_bbox_overlap(obj_to_bbox(obj), bbox) == obj_to_bbox(obj)
]
return initial_type(matching)
def crop_to_bbox(objs, bbox):
if isinstance(objs, dict):
return dict((k, crop_to_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
cropped = list(filter(None, (clip_obj(obj, bbox) for obj in objs)))
return initial_type(cropped)
def move_object(obj, axis, value):
assert axis in ("h", "v")
if axis == "h":
new_items = (
("x0", obj["x0"] + value),
("x1", obj["x1"] + value),
)
if axis == "v":
new_items = [
("top", obj["top"] + value),
("bottom", obj["bottom"] + value),
]
if "doctop" in obj:
new_items += [("doctop", obj["doctop"] + value)]
if "y0" in obj:
new_items += [
("y0", obj["y0"] - value),
("y1", obj["y1"] - value),
]
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def snap_objects(objs, attr, tolerance):
axis = {"x0": "h", "x1": "h", "top": "v", "bottom": "v"}[attr]
clusters = cluster_objects(objs, attr, tolerance)
avgs = [sum(map(itemgetter(attr), objs)) / len(objs) for objs in clusters]
snapped_clusters = [
[move_object(obj, axis, avg - obj[attr]) for obj in cluster]
for cluster, avg in zip(clusters, avgs)
]
return list(itertools.chain(*snapped_clusters))
def resize_object(obj, key, value):
assert key in ("x0", "x1", "top", "bottom")
old_value = obj[key]
diff = value - old_value
new_items = [
(key, value),
]
if key == "x0":
assert value <= obj["x1"]
new_items.append(("width", obj["x1"] - value))
elif key == "x1":
assert value >= obj["x0"]
new_items.append(("width", value - obj["x0"]))
elif key == "top":
assert value <= obj["bottom"]
new_items.append(("doctop", obj["doctop"] + diff))
new_items.append(("height", obj["height"] - diff))
if "y1" in obj:
new_items.append(("y1", obj["y1"] - diff))
elif key == "bottom":
assert value >= obj["top"]
new_items.append(("height", obj["height"] + diff))
if "y0" in obj:
new_items.append(("y0", obj["y0"] - diff))
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def curve_to_edges(curve):
point_pairs = zip(curve["points"], curve["points"][1:])
return [
{
"x0": min(p0[0], p1[0]),
"x1": max(p0[0], p1[0]),
"top": min(p0[1], p1[1]),
"doctop": min(p0[1], p1[1]) + (curve["doctop"] - curve["top"]),
"bottom": max(p0[1], p1[1]),
"width": abs(p0[0] - p1[0]),
"height": abs(p0[1] - p1[1]),
"orientation": "v" if p0[0] == p1[0] else ("h" if p0[1] == p1[1] else None),
}
for p0, p1 in point_pairs
]
def rect_to_edges(rect):
top, bottom, left, right = [dict(rect) for x in range(4)]
top.update(
{
"object_type": "rect_edge",
"height": decimalize(0),
"y0": rect["y1"],
"bottom": rect["top"],
"orientation": "h",
}
)
bottom.update(
{
"object_type": "rect_edge",
"height": decimalize(0),
"y1": rect["y0"],
"top": rect["top"] + rect["height"],
"doctop": rect["doctop"] + rect["height"],
"orientation": "h",
}
)
left.update(
{
"object_type": "rect_edge",
"width": decimalize(0),
"x1": rect["x0"],
"orientation": "v",
}
)
right.update(
{
"object_type": "rect_edge",
"width": decimalize(0),
"x0": rect["x1"],
"orientation": "v",
}
)
return [top, bottom, left, right]
def line_to_edge(line):
edge = dict(line)
edge["orientation"] = "h" if (line["top"] == line["bottom"]) else "v"
return edge
def obj_to_edges(obj):
return {
"line": lambda x: [line_to_edge(x)],
"rect": rect_to_edges,
"rect_edge": rect_to_edges,
"curve": curve_to_edges,
}[obj["object_type"]](obj)
def filter_edges(edges, orientation=None, edge_type=None, min_length=1):
if orientation not in ("v", "h", None):
raise ValueError("Orientation must be 'v' or 'h'")
def test(e):
dim = "height" if e["orientation"] == "v" else "width"
et_correct = e["object_type"] == edge_type if edge_type is not None else True
orient_correct = orientation is None or e["orientation"] == orientation
return et_correct and orient_correct and (e[dim] >= min_length)
edges = filter(test, edges)
return list(edges)
| true | true |
f71d3cc15d33fdc97cccb70b2307f687317cb7a6 | 11,427 | py | Python | main.py | ahkarimi/MMTOD | d8160f643a0ee1943630b45fa094617dd2237c7e | [
"MIT"
] | null | null | null | main.py | ahkarimi/MMTOD | d8160f643a0ee1943630b45fa094617dd2237c7e | [
"MIT"
] | null | null | null | main.py | ahkarimi/MMTOD | d8160f643a0ee1943630b45fa094617dd2237c7e | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify, render_template, session
import os
import pickle
import datetime
import time
import pandas as pd
import numpy as np
import random
import logging
##__________________________________ GPT-3 code __________________________________________##
from colorama import Fore, Back, Style
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
import sys, os
import pprint
import numpy as np
import torch
from image_handler import Handler
img_handler_obj = Handler()
# args = ArgsParser().parse()
# device = "cuda" if torch.cuda.is_available() else "cpu"
# n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
pp = pprint.PrettyPrinter(indent=4)
prev_beliefs = {}
domain_queue = []
# sys.stdout.flush()
model_checkpoint = "./output/checkpoint-108420"
decoding = "DECODING METHOD HERE"
## if decoding == 'nucleus':
## TOP_P = float(sys.argv[3])
delay = 0.5
## multiwoz_db = MultiWozDB()
print('\nLoading Model', end="")
if 'openai' in model_checkpoint:
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)
else:
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
# model.load_state_dict(torch.load(model_checkpoint))
model.eval()
model.to('cpu')
break_tokens = tokenizer.encode(tokenizer.eos_token) + tokenizer.encode('?') + tokenizer.encode('!')
# break_tokens = tokenizer.encode(tokenizer.eos_token)
MAX_LEN = model.config.n_ctx
if 'openai-gpt' in model_checkpoint:
tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})
tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})
sample = 1
#print()
#print('\n What would you like to ask?')
# history = []
context = ''
input_text = ''
turn = 0
# dbmatch = 0
def get_belief_new_dbsearch(sent):
if '<|belief|>' in sent:
tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0]
# elif 'belief.' in sent:
# tmp = sent.strip(' ').split('<belief>')[-1].split('<action>')[0]
# elif 'belief' not in sent:
# return []
else:
return []
# else:
# raise TypeError('unknown belief separator')
tmp = tmp.strip(' .,')
# assert tmp.endswith('<endofbelief>')
tmp = tmp.replace('<|endofbelief|>', '')
tmp = tmp.replace('<|endoftext|>', '')
belief = tmp.split(',')
new_belief = []
for bs in belief:
bs = bs.strip(' .,')
if bs not in new_belief:
new_belief.append(bs)
return new_belief
def convert_belief(belief):
dic = {}
for bs in belief:
if bs in [' ', '']:
continue
domain = bs.split(' ')[0]
slot = bs.split(' ')[1]
if slot == 'book':
slot = ' '.join(bs.split(' ')[1:3])
value = ' '.join(bs.split(' ')[3:])
else:
value = ' '.join(bs.split(' ')[2:])
if domain not in dic:
dic[domain] = {}
try:
dic[domain][slot] = value
except:
print(domain)
print(slot)
return dic
def get_turn_domain(beliefs, q):
for k in beliefs.keys():
if k not in q:
q.append(k)
turn_domain = k
return turn_domain
return q[-1]
def get_action_new(sent):
if '<|action|>' not in sent:
return []
elif '<|belief|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip()
elif '<|action|>' in sent:
tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip()
else:
return []
tmp = tmp.strip(' .,')
# if not tmp.endswith('<endofaction>'):
# ipdb.set_trace()
tmp = tmp.replace('<|endofaction|>', '')
tmp = tmp.replace('<|endoftext|>', '')
action = tmp.split(',')
new_action = []
for act in action:
if act == '':
continue
act = act.strip(' .,')
if act not in new_action:
new_action.append(act)
return new_action
def get_response_new(sent, venuename):
if '<|response|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1]
else:
return ''
# if '<belief>' in sent:
# tmp = sent.split('<belief>')[-1].split('<action>')[-1].split('<response>')[-1]
# elif '<action>' in sent:
# tmp = sent.split('<action>')[-1].split('<response>')[-1]
# elif '<response>' in sent:
# tmp = sent.split('<response>')[-1]
# else:
# tmp = sent
tmp = tmp.strip(' .,')
# assert tmp.endswith('<endofresponse>')
tmp = tmp.replace('<|endofresponse|>', '')
tmp = tmp.replace('<|endoftext|>', '')
tokens = tokenizer.encode(tmp)
new_tokens = []
for tok in tokens:
# if tok in break_tokens:
if tok in tokenizer.encode(tokenizer.eos_token):
continue
new_tokens.append(tok)
# ipdb.set_trace()
response = tokenizer.decode(new_tokens).strip(' ,.')
response = response.replace('[venuename]', '{}'.format(venuename))
return response
def get_venuename(bs):
name = ''
if 'venuename' in bs[0]:
tmp_list = bs[0].split('venuename')[-1].split(' ')
#action = tmp_list[-1]
name = ' '. join(tmp_list[:-1])
return name
def get_open_span(bs):
action_names = []
for tmp in bs[0].split(';'):
if 'open span' in tmp:
action = tmp.split('open span')[-1].split(' ')[-1]
name = tmp.split('open span')[-1].split(action)[0]
action_names.append((name, action))
return action_names
##____________________________ End of GPT-3 code __________________________________________##
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
app.secret_key = 'MY_SECRET_KEY'
def label_Message(message):
logging.warning('In label_Message')
# load the model from disk
model_filename = 'model/model.pkl'
tfidf_filename = 'model/tfidf.pkl'
model = pickle.load(open(model_filename, 'rb'))
tfidf = pickle.load(open(tfidf_filename, 'rb'))
pred = model.predict(tfidf.transform([message]))
message_label = pred[0]
logging.warning('Out label_Message')
return message_label
def label_to_persian(label):
res = ''
if label == 'HAPPY':
res = 'خوشحال'
elif label == 'SAD':
res = 'ناراحت'
return
def Create_message(message):
global context
global turn
logging.warning('In create message')
global result
label = session['label']
state = session['state']
result = session['result']
result['response'] = ''
result['status'] = 'on'
result['has_image'] = 'False'
raw_text = message
input_text = raw_text.replace('you> ', '')
if input_text in ['q', 'quit']:
return "Ok, bye. Just for now!"
user = '<|user|> {}'.format(input_text)
context = context + ' ' + user
text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)
# print(context)
text = text.strip()
indexed_tokens = tokenizer.encode(text)
if len(indexed_tokens) > MAX_LEN:
indexed_tokens = indexed_tokens[-1 * MAX_LEN:]
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
with torch.no_grad():
# Greedy decoding
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
if len(indexed_tokens) > MAX_LEN:
break
if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):
break
tmp_pred = tokenizer.decode(indexed_tokens)
print('\ntmp_pred:\n', tmp_pred)
belief_text = get_belief_new_dbsearch(tmp_pred)
print('\nbelief_text:\n', belief_text)
beliefs = convert_belief(belief_text)
# domain = list(beliefs.keys())[0]
domain = get_turn_domain(beliefs, domain_queue)
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
truncate_action = False
# Predict all tokens
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
if len(indexed_tokens) > MAX_LEN:
break
predicted_text = tokenizer.decode(indexed_tokens)
if '<|action|>' in predicted_text:
generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')
new_actions = []
for a in generated_actions:
if a in ['', ' ']:
continue
new_actions.append(a.strip())
len_actions = len(new_actions)
if len(list(set(new_actions))) > len(new_actions) or (len_actions > 10 and not truncate_action):
# ipdb.set_trace()
actions = '<|action|> {} <|endofaction|>'.format(' , '.join(list(set(new_actions))))
indexed_tokens = tokenizer.encode('{} {}'.format(predicted_text.split('<|action|>')[0], actions))
# print('action truncated')
truncate_action = True
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
predicted_text = tokenizer.decode(indexed_tokens)
print('\npredicted_text:\n', predicted_text)
action_text = get_action_new(predicted_text)
print('\naction_text:\n', action_text)
venuename = get_venuename(action_text)
#print('\nVenuename:\n', venuename)
response_text = get_response_new(predicted_text, venuename)
print('\nresponse_text:\n', response_text)
#print(predicted_text)
open_spans = get_open_span(action_text)
print('\open_spans:\n', open_spans)
# handling images
if venuename:
result['has_image'] = 'True'
images = img_handler_obj.get_imgs_url(query=venuename + "in Singapore", num_of_img=5)
result['image'] = images[0]
print(images)
delex_system = '{}'.format(response_text)
context = context + ' ' + delex_system
turn += 1
prev_beliefs = beliefs
result['response'] = response_text
session['result'] = result
return result
@app.route('/')
def index():
session['state'] = 'start'
session['label'] = ''
session['result'] = {}
return render_template('index2.html')
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
response_text = Create_message(message)
#print('\nRESPONSE TEXT ', response_text)
return jsonify(response_text)
| 28.929114 | 117 | 0.608734 | from flask import Flask, request, jsonify, render_template, session
import os
import pickle
import datetime
import time
import pandas as pd
import numpy as np
import random
import logging
T2LMHeadModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
import sys, os
import pprint
import numpy as np
import torch
from image_handler import Handler
img_handler_obj = Handler()
pp = pprint.PrettyPrinter(indent=4)
prev_beliefs = {}
domain_queue = []
model_checkpoint = "./output/checkpoint-108420"
decoding = "DECODING METHOD HERE"
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)
else:
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
model.eval()
model.to('cpu')
break_tokens = tokenizer.encode(tokenizer.eos_token) + tokenizer.encode('?') + tokenizer.encode('!')
MAX_LEN = model.config.n_ctx
if 'openai-gpt' in model_checkpoint:
tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})
tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})
sample = 1
context = ''
input_text = ''
turn = 0
def get_belief_new_dbsearch(sent):
if '<|belief|>' in sent:
tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0]
else:
return []
tmp = tmp.strip(' .,')
tmp = tmp.replace('<|endofbelief|>', '')
tmp = tmp.replace('<|endoftext|>', '')
belief = tmp.split(',')
new_belief = []
for bs in belief:
bs = bs.strip(' .,')
if bs not in new_belief:
new_belief.append(bs)
return new_belief
def convert_belief(belief):
dic = {}
for bs in belief:
if bs in [' ', '']:
continue
domain = bs.split(' ')[0]
slot = bs.split(' ')[1]
if slot == 'book':
slot = ' '.join(bs.split(' ')[1:3])
value = ' '.join(bs.split(' ')[3:])
else:
value = ' '.join(bs.split(' ')[2:])
if domain not in dic:
dic[domain] = {}
try:
dic[domain][slot] = value
except:
print(domain)
print(slot)
return dic
def get_turn_domain(beliefs, q):
for k in beliefs.keys():
if k not in q:
q.append(k)
turn_domain = k
return turn_domain
return q[-1]
def get_action_new(sent):
if '<|action|>' not in sent:
return []
elif '<|belief|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip()
elif '<|action|>' in sent:
tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip()
else:
return []
tmp = tmp.strip(' .,')
tmp = tmp.replace('<|endofaction|>', '')
tmp = tmp.replace('<|endoftext|>', '')
action = tmp.split(',')
new_action = []
for act in action:
if act == '':
continue
act = act.strip(' .,')
if act not in new_action:
new_action.append(act)
return new_action
def get_response_new(sent, venuename):
if '<|response|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1]
else:
return ''
tmp = tmp.strip(' .,')
tmp = tmp.replace('<|endofresponse|>', '')
tmp = tmp.replace('<|endoftext|>', '')
tokens = tokenizer.encode(tmp)
new_tokens = []
for tok in tokens:
if tok in tokenizer.encode(tokenizer.eos_token):
continue
new_tokens.append(tok)
response = tokenizer.decode(new_tokens).strip(' ,.')
response = response.replace('[venuename]', '{}'.format(venuename))
return response
def get_venuename(bs):
name = ''
if 'venuename' in bs[0]:
tmp_list = bs[0].split('venuename')[-1].split(' ')
name = ' '. join(tmp_list[:-1])
return name
def get_open_span(bs):
action_names = []
for tmp in bs[0].split(';'):
if 'open span' in tmp:
action = tmp.split('open span')[-1].split(' ')[-1]
name = tmp.split('open span')[-1].split(action)[0]
action_names.append((name, action))
return action_names
KEY'
def label_Message(message):
logging.warning('In label_Message')
model_filename = 'model/model.pkl'
tfidf_filename = 'model/tfidf.pkl'
model = pickle.load(open(model_filename, 'rb'))
tfidf = pickle.load(open(tfidf_filename, 'rb'))
pred = model.predict(tfidf.transform([message]))
message_label = pred[0]
logging.warning('Out label_Message')
return message_label
def label_to_persian(label):
res = ''
if label == 'HAPPY':
res = 'خوشحال'
elif label == 'SAD':
res = 'ناراحت'
return
def Create_message(message):
global context
global turn
logging.warning('In create message')
global result
label = session['label']
state = session['state']
result = session['result']
result['response'] = ''
result['status'] = 'on'
result['has_image'] = 'False'
raw_text = message
input_text = raw_text.replace('you> ', '')
if input_text in ['q', 'quit']:
return "Ok, bye. Just for now!"
user = '<|user|> {}'.format(input_text)
context = context + ' ' + user
text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)
text = text.strip()
indexed_tokens = tokenizer.encode(text)
if len(indexed_tokens) > MAX_LEN:
indexed_tokens = indexed_tokens[-1 * MAX_LEN:]
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
if len(indexed_tokens) > MAX_LEN:
break
if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):
break
tmp_pred = tokenizer.decode(indexed_tokens)
print('\ntmp_pred:\n', tmp_pred)
belief_text = get_belief_new_dbsearch(tmp_pred)
print('\nbelief_text:\n', belief_text)
beliefs = convert_belief(belief_text)
domain = get_turn_domain(beliefs, domain_queue)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
truncate_action = False
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
if len(indexed_tokens) > MAX_LEN:
break
predicted_text = tokenizer.decode(indexed_tokens)
if '<|action|>' in predicted_text:
generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')
new_actions = []
for a in generated_actions:
if a in ['', ' ']:
continue
new_actions.append(a.strip())
len_actions = len(new_actions)
if len(list(set(new_actions))) > len(new_actions) or (len_actions > 10 and not truncate_action):
actions = '<|action|> {} <|endofaction|>'.format(' , '.join(list(set(new_actions))))
indexed_tokens = tokenizer.encode('{} {}'.format(predicted_text.split('<|action|>')[0], actions))
truncate_action = True
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
predicted_text = tokenizer.decode(indexed_tokens)
print('\npredicted_text:\n', predicted_text)
action_text = get_action_new(predicted_text)
print('\naction_text:\n', action_text)
venuename = get_venuename(action_text)
response_text = get_response_new(predicted_text, venuename)
print('\nresponse_text:\n', response_text)
open_spans = get_open_span(action_text)
print('\open_spans:\n', open_spans)
if venuename:
result['has_image'] = 'True'
images = img_handler_obj.get_imgs_url(query=venuename + "in Singapore", num_of_img=5)
result['image'] = images[0]
print(images)
delex_system = '{}'.format(response_text)
context = context + ' ' + delex_system
turn += 1
prev_beliefs = beliefs
result['response'] = response_text
session['result'] = result
return result
@app.route('/')
def index():
session['state'] = 'start'
session['label'] = ''
session['result'] = {}
return render_template('index2.html')
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
response_text = Create_message(message)
return jsonify(response_text)
| true | true |
f71d3cdd4e0a67724f010c5aed3dd6ac0db2e50a | 1,812 | py | Python | tests/test_rules_sqs_policy_public.py | ocrawford555/cfripper | 93b039293ea76fcb28d32b497fb0507042557b5b | [
"Apache-2.0"
] | null | null | null | tests/test_rules_sqs_policy_public.py | ocrawford555/cfripper | 93b039293ea76fcb28d32b497fb0507042557b5b | [
"Apache-2.0"
] | null | null | null | tests/test_rules_sqs_policy_public.py | ocrawford555/cfripper | 93b039293ea76fcb28d32b497fb0507042557b5b | [
"Apache-2.0"
] | null | null | null | """
Copyright 2018 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
import os
import pycfmodel
from cfripper.rules.SQSQueuePolicyPublicRule import SQSQueuePolicyPublicRule
from cfripper.s3_adapter import S3Adapter
from cfripper.model.result import Result
class TestSQSQueuePolicyPublicRule:
@pytest.fixture(scope="class")
def template(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
cf_script = open('{}/test_templates/sqs_policy_public.json'.format(dir_path))
cf_template = S3Adapter().convert_json_or_yaml_to_dict(cf_script.read())
return pycfmodel.parse(cf_template)
def test_public(self, template):
result = Result()
rule = SQSQueuePolicyPublicRule(None, result)
rule.invoke(template.resources)
assert not result.valid
assert len(result.failed_rules) == 4
assert result.failed_rules[0]['reason'] == 'SQS Queue policy QueuePolicyPublic1 should not be public'
assert result.failed_rules[1]['reason'] == 'SQS Queue policy QueuePolicyPublic2 should not be public'
assert result.failed_rules[2]['reason'] == 'SQS Queue policy QueuePolicyPublic3 should not be public'
assert result.failed_rules[3]['reason'] == 'SQS Queue policy QueuePolicyPublic4 should not be public'
| 38.553191 | 109 | 0.75 |
import pytest
import os
import pycfmodel
from cfripper.rules.SQSQueuePolicyPublicRule import SQSQueuePolicyPublicRule
from cfripper.s3_adapter import S3Adapter
from cfripper.model.result import Result
class TestSQSQueuePolicyPublicRule:
@pytest.fixture(scope="class")
def template(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
cf_script = open('{}/test_templates/sqs_policy_public.json'.format(dir_path))
cf_template = S3Adapter().convert_json_or_yaml_to_dict(cf_script.read())
return pycfmodel.parse(cf_template)
def test_public(self, template):
result = Result()
rule = SQSQueuePolicyPublicRule(None, result)
rule.invoke(template.resources)
assert not result.valid
assert len(result.failed_rules) == 4
assert result.failed_rules[0]['reason'] == 'SQS Queue policy QueuePolicyPublic1 should not be public'
assert result.failed_rules[1]['reason'] == 'SQS Queue policy QueuePolicyPublic2 should not be public'
assert result.failed_rules[2]['reason'] == 'SQS Queue policy QueuePolicyPublic3 should not be public'
assert result.failed_rules[3]['reason'] == 'SQS Queue policy QueuePolicyPublic4 should not be public'
| true | true |
f71d3dbe0d00b7ccbb10b00fcb79eb056d1a9840 | 762 | py | Python | ProjectEulerPython/problems/problem_021.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | ProjectEulerPython/problems/problem_021.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | ProjectEulerPython/problems/problem_021.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | import math_helpers
from problems.problem import Problem
class Problem021(Problem):
def __init__(self):
super().__init__()
self._cache = {}
def calculate_answer(self) -> int:
answer = 0
n = 10000
primes = math_helpers.primes_below(n)
for i in range(2, n):
sum_1 = self.sum_proper_divisors(i, primes)
sum_2 = self.sum_proper_divisors(sum_1, primes)
if i == sum_2 and i != sum_1:
answer += i
self.print_detail(f"amicable number: {i} ({sum_1}); sum = {answer}")
return answer
def sum_proper_divisors(self, n: int, primes: [int]) -> int:
if n in self._cache:
return self._cache[n]
value = sum(math_helpers.divisors(n, True, primes))
self._cache[n] = value
return value | 24.580645 | 76 | 0.641732 | import math_helpers
from problems.problem import Problem
class Problem021(Problem):
def __init__(self):
super().__init__()
self._cache = {}
def calculate_answer(self) -> int:
answer = 0
n = 10000
primes = math_helpers.primes_below(n)
for i in range(2, n):
sum_1 = self.sum_proper_divisors(i, primes)
sum_2 = self.sum_proper_divisors(sum_1, primes)
if i == sum_2 and i != sum_1:
answer += i
self.print_detail(f"amicable number: {i} ({sum_1}); sum = {answer}")
return answer
def sum_proper_divisors(self, n: int, primes: [int]) -> int:
if n in self._cache:
return self._cache[n]
value = sum(math_helpers.divisors(n, True, primes))
self._cache[n] = value
return value | true | true |
f71d3e61c2e89c13b3a370bc25b2b344042de73e | 2,278 | py | Python | ptsites/sites/abtorrents.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | ptsites/sites/abtorrents.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | ptsites/sites/abtorrents.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | import ast
import hashlib
from urllib.parse import urljoin
from ..schema.site_base import Work, SignState, NetworkState
from ..schema.xbt import XBT
class MainClass(XBT):
URL = 'https://abtorrents.me/'
USER_CLASSES = {
'uploaded': [536870912000],
'share_ratio': [1.5],
'days': [90],
}
def build_workflow(self, entry, config):
return [
Work(
url='/login.php?returnto=%2F',
method='get',
check_state=('network', NetworkState.SUCCEED),
),
Work(
url='/simpleCaptcha.php',
method='get',
check_state=('network', NetworkState.SUCCEED),
),
Work(
url='/takelogin.php',
method='password',
succeed_regex='Logout',
check_state=('final', SignState.SUCCEED),
is_base_content=True,
response_urls=['/']
)
]
def sign_in_by_password(self, entry, config, work, last_content):
login = entry['site_config'].get('login')
if not login:
entry.fail_with_prefix('Login data not found!')
return
last_content = ast.literal_eval(last_content)
target = {'light bulb': '44c7285b', 'house': 'b9a403b9', 'musical note': '3a8441da', 'key': '2faefa2b', 'bug':
'c2ba10a5', 'heart': 'bed5a0e2', 'clock': '99d86267', 'world': 'ededf171'}[last_content['text']]
for hash in last_content['images']:
if hashlib.shake_128(self._request(entry, 'get', urljoin(entry['url'], '/simpleCaptcha.php?hash=' + hash))
.content).hexdigest(4) == target:
break
data = {
'username': login['username'],
'password': login['password'],
'remember': 1,
'captchaSelection': hash,
'submitme': 'X',
'returnto': '/'
}
login_response = self._request(entry, 'post', work.url, data=data)
login_network_state = self.check_network_state(entry, work, login_response)
if login_network_state != NetworkState.SUCCEED:
return
return login_response
| 35.59375 | 118 | 0.533802 | import ast
import hashlib
from urllib.parse import urljoin
from ..schema.site_base import Work, SignState, NetworkState
from ..schema.xbt import XBT
class MainClass(XBT):
URL = 'https://abtorrents.me/'
USER_CLASSES = {
'uploaded': [536870912000],
'share_ratio': [1.5],
'days': [90],
}
def build_workflow(self, entry, config):
return [
Work(
url='/login.php?returnto=%2F',
method='get',
check_state=('network', NetworkState.SUCCEED),
),
Work(
url='/simpleCaptcha.php',
method='get',
check_state=('network', NetworkState.SUCCEED),
),
Work(
url='/takelogin.php',
method='password',
succeed_regex='Logout',
check_state=('final', SignState.SUCCEED),
is_base_content=True,
response_urls=['/']
)
]
def sign_in_by_password(self, entry, config, work, last_content):
login = entry['site_config'].get('login')
if not login:
entry.fail_with_prefix('Login data not found!')
return
last_content = ast.literal_eval(last_content)
target = {'light bulb': '44c7285b', 'house': 'b9a403b9', 'musical note': '3a8441da', 'key': '2faefa2b', 'bug':
'c2ba10a5', 'heart': 'bed5a0e2', 'clock': '99d86267', 'world': 'ededf171'}[last_content['text']]
for hash in last_content['images']:
if hashlib.shake_128(self._request(entry, 'get', urljoin(entry['url'], '/simpleCaptcha.php?hash=' + hash))
.content).hexdigest(4) == target:
break
data = {
'username': login['username'],
'password': login['password'],
'remember': 1,
'captchaSelection': hash,
'submitme': 'X',
'returnto': '/'
}
login_response = self._request(entry, 'post', work.url, data=data)
login_network_state = self.check_network_state(entry, work, login_response)
if login_network_state != NetworkState.SUCCEED:
return
return login_response
| true | true |
f71d3e9c75c3721c2fe3bf9687f4cf71a6d3d676 | 559 | py | Python | packs/vdx/actions/interface_set_ip.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/vdx/actions/interface_set_ip.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/vdx/actions/interface_set_ip.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from pynos import device
from st2actions.runners.pythonrunner import Action
class interface_set_ip(Action):
def run(self, **kwargs):
conn = (str(kwargs.pop('ip')), str(kwargs.pop('port')))
auth = (str(kwargs.pop('username')), str(kwargs.pop('password')))
test = kwargs.pop('test', False)
callback = kwargs.pop('callback', None)
with device.Device(
conn=conn, auth=auth,
test=test,
callback=callback
) as dev:
dev.interface.set_ip(**kwargs)
return 0
| 31.055556 | 73 | 0.59034 | from pynos import device
from st2actions.runners.pythonrunner import Action
class interface_set_ip(Action):
def run(self, **kwargs):
conn = (str(kwargs.pop('ip')), str(kwargs.pop('port')))
auth = (str(kwargs.pop('username')), str(kwargs.pop('password')))
test = kwargs.pop('test', False)
callback = kwargs.pop('callback', None)
with device.Device(
conn=conn, auth=auth,
test=test,
callback=callback
) as dev:
dev.interface.set_ip(**kwargs)
return 0
| true | true |
f71d3f2d291d9fcbb9b50687282f5924ef62d1c9 | 4,685 | py | Python | vice/version.py | rcooke-ast/VICE | 762911eb4192c7206ce2ae36b645d120ed889cb7 | [
"MIT"
] | 22 | 2018-09-26T21:02:51.000Z | 2022-03-24T18:07:03.000Z | vice/version.py | rcooke-ast/VICE | 762911eb4192c7206ce2ae36b645d120ed889cb7 | [
"MIT"
] | 2 | 2019-05-03T13:08:27.000Z | 2021-02-17T20:11:37.000Z | vice/version.py | rcooke-ast/VICE | 762911eb4192c7206ce2ae36b645d120ed889cb7 | [
"MIT"
] | 3 | 2019-05-10T19:26:31.000Z | 2021-11-10T08:13:42.000Z | r"""
This file implements the version_info class.
"""
from __future__ import absolute_import
from . import version_breakdown
import sys
if sys.version_info[:3] < tuple(
[int(_) for _ in version_breakdown.MIN_PYTHON_VERSION.split('.')]):
raise RuntimeError("""This version of VICE requires python >= %s. \
Current version: %d.%d.%d""" % (version_breakdown.MIN_PYTHON_VERSION,
sys.version_info.major, sys.version_info.minor,
sys.version_info.micro))
else: pass
class version_info:
r"""
**VICE Version Information**
In keeping with convention, VICE's version string can be accessed via
``vice.__version__``. Alternatively, this object can simply be type-casted
to a string via ``str(vice.version)``.
VICE records its version number according to the semantic versioning method
described in PEP 440 [1]_.
Attributes
----------
major : ``int``
The major version number of this release.
minor : ``int``
The minor version number of this release.
micro : ``int``
The micro version number of this release (also known as patch number).
dev : ``int``
The development version number of this release. ``None`` if this is not
a development release.
alpha : ``int``
The alpha version number of this release. ``None`` if this is not an
alpha release.
beta : ``int``
The beta version number of this release. ``None`` if this is not a beta
release.
rc : ``int``
The release candidate number of this release. ``None`` if this is not a
release candidate.
post : ``int``
The post number of this release. ``None`` if this is not a post release.
isreleased : ``bool``
Whether or not this version has been released. If False, users are
advised to contact a contributor to VICE if they are not a contributor
themselves.
.. note:: At most one of the attributes ``dev``, ``alpha``, ``beta``,
``rc``, and ``post`` will not be ``None``.
Notes
-----
This object can be type-cast to a tuple of the form:
``(major, minor, micro, dev, alpha, beta, rc, post)``
Alternatively, the information can be obtained in dictionary format via
``vice.version.todict()``.
.. [1] https://www.python.org/dev/peps/pep-0440/
"""
def __repr__(self):
rep = "%d.%d.%d" % (self.major, self.minor, self.micro)
if self.dev is not None:
assert isinstance(self.dev, int), "Invalid version information"
rep += ".dev%d" % (self.dev)
elif self.alpha is not None:
assert isinstance(self.alpha, int), "Invalid version information"
rep += "a%d" % (self.alpha)
elif self.beta is not None:
assert isinstance(self.beta, int), "Invalid version information"
rep += "b%d" % (self.beta)
elif self.rc is not None:
assert isinstance(self.rc, int), "Invalid version information"
rep += "rc%d" % (self.rc)
elif self.post is not None:
assert isinstance(self.post, int), "Invalid version information"
rep += ".post%d" % (self.post)
else: pass
return rep
def __iter__(self):
yield self.major
yield self.minor
yield self.micro
yield self.dev
yield self.alpha
yield self.beta
yield self.rc
yield self.post
def __getitem__(self, key):
return tuple(self).__getitem__(key)
def todict(self):
r"""
Convert this object into a dictionary.
"""
return {
"major": self.major,
"minor": self.minor,
"micro": self.micro,
"dev": self.dev,
"alpha": self.alpha,
"beta": self.beta,
"rc": self.rc,
"post": self.post
}
@property
def major(self):
r"""
The major version number.
"""
return version_breakdown.MAJOR
@property
def minor(self):
r"""
The minor version number.
"""
return version_breakdown.MINOR
@property
def micro(self):
r"""
The micro version number (also known as patch number).
"""
return version_breakdown.MICRO
@property
def dev(self):
r"""
The development number for this release. ``None`` if this is not a
development release.
"""
return version_breakdown.DEV
@property
def alpha(self):
r"""
The alpha number for this release. ``None`` if this is not an alpha
release.
"""
return version_breakdown.ALPHA
@property
def beta(self):
r"""
The beta number for this release. ``None`` if this is not a beta
release.
"""
return version_breakdown.BETA
@property
def rc(self):
r"""
The rc number for this release. ``None`` if this is not a release
candidate.
"""
return version_breakdown.RC
@property
def post(self):
r"""
The post number for this release. ``None`` if this is not a post
release.
"""
return version_breakdown.POST
@property
def isreleased(self):
r"""
If True, this version of VICE has been released.
"""
return version_breakdown.ISRELEASED
version = version_info()
| 24.657895 | 76 | 0.682177 |
from __future__ import absolute_import
from . import version_breakdown
import sys
if sys.version_info[:3] < tuple(
[int(_) for _ in version_breakdown.MIN_PYTHON_VERSION.split('.')]):
raise RuntimeError("""This version of VICE requires python >= %s. \
Current version: %d.%d.%d""" % (version_breakdown.MIN_PYTHON_VERSION,
sys.version_info.major, sys.version_info.minor,
sys.version_info.micro))
else: pass
class version_info:
def __repr__(self):
rep = "%d.%d.%d" % (self.major, self.minor, self.micro)
if self.dev is not None:
assert isinstance(self.dev, int), "Invalid version information"
rep += ".dev%d" % (self.dev)
elif self.alpha is not None:
assert isinstance(self.alpha, int), "Invalid version information"
rep += "a%d" % (self.alpha)
elif self.beta is not None:
assert isinstance(self.beta, int), "Invalid version information"
rep += "b%d" % (self.beta)
elif self.rc is not None:
assert isinstance(self.rc, int), "Invalid version information"
rep += "rc%d" % (self.rc)
elif self.post is not None:
assert isinstance(self.post, int), "Invalid version information"
rep += ".post%d" % (self.post)
else: pass
return rep
def __iter__(self):
yield self.major
yield self.minor
yield self.micro
yield self.dev
yield self.alpha
yield self.beta
yield self.rc
yield self.post
def __getitem__(self, key):
return tuple(self).__getitem__(key)
def todict(self):
return {
"major": self.major,
"minor": self.minor,
"micro": self.micro,
"dev": self.dev,
"alpha": self.alpha,
"beta": self.beta,
"rc": self.rc,
"post": self.post
}
@property
def major(self):
return version_breakdown.MAJOR
@property
def minor(self):
return version_breakdown.MINOR
@property
def micro(self):
return version_breakdown.MICRO
@property
def dev(self):
return version_breakdown.DEV
@property
def alpha(self):
return version_breakdown.ALPHA
@property
def beta(self):
return version_breakdown.BETA
@property
def rc(self):
return version_breakdown.RC
@property
def post(self):
return version_breakdown.POST
@property
def isreleased(self):
return version_breakdown.ISRELEASED
version = version_info()
| true | true |
f71d3f2ea6d57155cccf341e8d69f798c14957e6 | 6,459 | py | Python | ding/entry/serial_entry_reward_model.py | konnase/DI-engine | f803499cad191e9277b10e194132d74757bcfc8e | [
"Apache-2.0"
] | null | null | null | ding/entry/serial_entry_reward_model.py | konnase/DI-engine | f803499cad191e9277b10e194132d74757bcfc8e | [
"Apache-2.0"
] | null | null | null | ding/entry/serial_entry_reward_model.py | konnase/DI-engine | f803499cad191e9277b10e194132d74757bcfc8e | [
"Apache-2.0"
] | null | null | null | from typing import Union, Optional, List, Any, Tuple
import os
import torch
import logging
from functools import partial
from tensorboardX import SummaryWriter
from ding.envs import get_vec_env_setting, create_env_manager
from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \
create_serial_collector
from ding.config import read_config, compile_config
from ding.policy import create_policy, PolicyFactory
from ding.reward_model import create_reward_model
from ding.utils import set_pkg_seed
def serial_pipeline_reward_model(
input_cfg: Union[str, Tuple[dict, dict]],
seed: int = 0,
env_setting: Optional[List[Any]] = None,
model: Optional[torch.nn.Module] = None,
max_iterations: Optional[int] = int(1e10),
) -> 'Policy': # noqa
"""
Overview:
Serial pipeline entry with reward model.
Arguments:
- input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \
``str`` type means config file path. \
``Tuple[dict, dict]`` type means [user_config, create_cfg].
- seed (:obj:`int`): Random seed.
- env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \
``BaseEnv`` subclass, collector env config, and evaluator env config.
- model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.
- max_iterations (:obj:`Optional[torch.nn.Module]`): Learner's max iteration. Pipeline will stop \
when reaching this iteration.
Returns:
- policy (:obj:`Policy`): Converged policy.
"""
if isinstance(input_cfg, str):
cfg, create_cfg = read_config(input_cfg)
else:
cfg, create_cfg = input_cfg
create_cfg.policy.type = create_cfg.policy.type + '_command'
env_fn = None if env_setting is None else env_setting[0]
cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True)
# Create main components: env, policy
if env_setting is None:
env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env)
else:
env_fn, collector_env_cfg, evaluator_env_cfg = env_setting
collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg])
evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg])
collector_env.seed(cfg.seed)
evaluator_env.seed(cfg.seed, dynamic_seed=False)
set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda)
policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command'])
# Create worker components: learner, collector, evaluator, replay buffer, commander.
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))
learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name)
collector = create_serial_collector(
cfg.policy.collect.collector,
env=collector_env,
policy=policy.collect_mode,
tb_logger=tb_logger,
exp_name=cfg.exp_name
)
evaluator = InteractionSerialEvaluator(
cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name
)
replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name)
commander = BaseSerialCommander(
cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode
)
reward_model = create_reward_model(cfg.reward_model, policy.collect_mode.get_attribute('device'), tb_logger)
# ==========
# Main loop
# ==========
# Learner's before_run hook.
learner.call_hook('before_run')
# Accumulate plenty of data at the beginning of training.
if cfg.policy.get('random_collect_size', 0) > 0:
action_space = collector_env.env_info().act_space
random_policy = PolicyFactory.get_random_policy(policy.collect_mode, action_space=action_space)
collector.reset_policy(random_policy)
collect_kwargs = commander.step()
new_data = collector.collect(n_sample=cfg.policy.random_collect_size, policy_kwargs=collect_kwargs)
replay_buffer.push(new_data, cur_collector_envstep=0)
collector.reset_policy(policy.collect_mode)
for _ in range(max_iterations):
collect_kwargs = commander.step()
# Evaluate policy performance
if evaluator.should_eval(learner.train_iter):
stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep)
if stop:
break
new_data_count, target_new_data_count = 0, cfg.reward_model.get('target_new_data_count', 1)
while new_data_count < target_new_data_count:
new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs)
new_data_count += len(new_data)
# collect data for reward_model training
reward_model.collect_data(new_data)
replay_buffer.push(new_data, cur_collector_envstep=collector.envstep)
# update reward_model
reward_model.train()
reward_model.clear_data()
# Learn policy from collected data
for i in range(cfg.policy.learn.update_per_collect):
# Learner will train ``update_per_collect`` times in one iteration.
train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter)
if train_data is None:
# It is possible that replay buffer's data count is too few to train ``update_per_collect`` times
logging.warning(
"Replay buffer's data can only train for {} steps. ".format(i) +
"You can modify data collect config, e.g. increasing n_sample, n_episode."
)
break
# update train_data reward
reward_model.estimate(train_data)
learner.train(train_data, collector.envstep)
if learner.policy.get_attribute('priority'):
replay_buffer.update(learner.priority_info)
if cfg.policy.on_policy:
# On-policy algorithm must clear the replay buffer.
replay_buffer.clear()
# Learner's after_run hook.
learner.call_hook('after_run')
return policy
| 48.56391 | 113 | 0.690355 | from typing import Union, Optional, List, Any, Tuple
import os
import torch
import logging
from functools import partial
from tensorboardX import SummaryWriter
from ding.envs import get_vec_env_setting, create_env_manager
from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \
create_serial_collector
from ding.config import read_config, compile_config
from ding.policy import create_policy, PolicyFactory
from ding.reward_model import create_reward_model
from ding.utils import set_pkg_seed
def serial_pipeline_reward_model(
input_cfg: Union[str, Tuple[dict, dict]],
seed: int = 0,
env_setting: Optional[List[Any]] = None,
model: Optional[torch.nn.Module] = None,
max_iterations: Optional[int] = int(1e10),
) -> 'Policy':
if isinstance(input_cfg, str):
cfg, create_cfg = read_config(input_cfg)
else:
cfg, create_cfg = input_cfg
create_cfg.policy.type = create_cfg.policy.type + '_command'
env_fn = None if env_setting is None else env_setting[0]
cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True)
if env_setting is None:
env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env)
else:
env_fn, collector_env_cfg, evaluator_env_cfg = env_setting
collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg])
evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg])
collector_env.seed(cfg.seed)
evaluator_env.seed(cfg.seed, dynamic_seed=False)
set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda)
policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command'])
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))
learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name)
collector = create_serial_collector(
cfg.policy.collect.collector,
env=collector_env,
policy=policy.collect_mode,
tb_logger=tb_logger,
exp_name=cfg.exp_name
)
evaluator = InteractionSerialEvaluator(
cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name
)
replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name)
commander = BaseSerialCommander(
cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode
)
reward_model = create_reward_model(cfg.reward_model, policy.collect_mode.get_attribute('device'), tb_logger)
learner.call_hook('before_run')
# Accumulate plenty of data at the beginning of training.
if cfg.policy.get('random_collect_size', 0) > 0:
action_space = collector_env.env_info().act_space
random_policy = PolicyFactory.get_random_policy(policy.collect_mode, action_space=action_space)
collector.reset_policy(random_policy)
collect_kwargs = commander.step()
new_data = collector.collect(n_sample=cfg.policy.random_collect_size, policy_kwargs=collect_kwargs)
replay_buffer.push(new_data, cur_collector_envstep=0)
collector.reset_policy(policy.collect_mode)
for _ in range(max_iterations):
collect_kwargs = commander.step()
# Evaluate policy performance
if evaluator.should_eval(learner.train_iter):
stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep)
if stop:
break
new_data_count, target_new_data_count = 0, cfg.reward_model.get('target_new_data_count', 1)
while new_data_count < target_new_data_count:
new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs)
new_data_count += len(new_data)
# collect data for reward_model training
reward_model.collect_data(new_data)
replay_buffer.push(new_data, cur_collector_envstep=collector.envstep)
# update reward_model
reward_model.train()
reward_model.clear_data()
# Learn policy from collected data
for i in range(cfg.policy.learn.update_per_collect):
# Learner will train ``update_per_collect`` times in one iteration.
train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter)
if train_data is None:
# It is possible that replay buffer's data count is too few to train ``update_per_collect`` times
logging.warning(
"Replay buffer's data can only train for {} steps. ".format(i) +
"You can modify data collect config, e.g. increasing n_sample, n_episode."
)
break
# update train_data reward
reward_model.estimate(train_data)
learner.train(train_data, collector.envstep)
if learner.policy.get_attribute('priority'):
replay_buffer.update(learner.priority_info)
if cfg.policy.on_policy:
# On-policy algorithm must clear the replay buffer.
replay_buffer.clear()
# Learner's after_run hook.
learner.call_hook('after_run')
return policy
| true | true |
f71d3fe83ab81cbaa275dcdf79598ad722259587 | 6,853 | py | Python | gen_version.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | 1 | 2021-07-22T20:56:28.000Z | 2021-07-22T20:56:28.000Z | gen_version.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | null | null | null | gen_version.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate version string for NotoColorEmoji.
This parses the color emoji template file and updates the lines
containing version string info, writing a new file.
The nameID 5 field in the emoji font should reflect the commit/date
of the repo it was built from. This will build a string of the following
format:
Version 1.39;GOOG;noto-emoji:20170220:a8a215d2e889'
This is intended to indicate that it was built by Google from noto-emoji
at commit a8a215d2e889 and date 20170220 (since dates are a bit easier
to locate in time than commit hashes).
For building with external data we don't include the commit id as we
might be using different resoruces. Instead the version string is:
Version 1.39;GOOG;noto-emoji:20170518;BETA <msg>
Here the date is the current date, and the message after 'BETA ' is
provided using the '-b' flag. There's no commit hash. This also
bypasses some checks about the state of the repo.
The relase number should have 2 or 3 minor digits. Right now we've been
using 2 but at the next major relase we probably want to use 3. This
supports both. It will bump the version number if none is provided,
maintaining the minor digit length.
"""
import argparse
import datetime
import re
from nototools import tool_utils
# These are not very lenient, we expect to be applied to the noto color
# emoji template ttx file which matches these. Why then require the
# input argument, you ask? Um... testing?
_nameid_re = re.compile(r'\s*<namerecord nameID="5"')
_version_re = re.compile(r'\s*Version\s(\d+.\d{2,3})')
_headrev_re = re.compile(r'\s*<fontRevision value="(\d+.\d{2,3})"/>')
def _get_existing_version(lines):
"""Scan lines for all existing version numbers, and ensure they match.
Return the matched version number string."""
version = None
def check_version(new_version):
if version is not None and new_version != version:
raise Exception(
'version %s and namerecord version %s do not match' % (
version, new_version))
return new_version
saw_nameid = False
for line in lines:
if saw_nameid:
saw_nameid = False
m = _version_re.match(line)
if not m:
raise Exception('could not match line "%s" in namerecord' % line)
version = check_version(m.group(1))
elif _nameid_re.match(line):
saw_nameid = True
else:
m = _headrev_re.match(line)
if m:
version = check_version(m.group(1))
return version
def _version_to_mm(version):
majs, mins = version.split('.')
minor_len = len(mins)
return int(majs), int(mins), minor_len
def _mm_to_version(major, minor, minor_len):
fmt = '%%d.%%0%dd' % minor_len
return fmt % (major, minor)
def _version_compare(lhs, rhs):
lmaj, lmin, llen = _version_to_mm(lhs)
rmaj, rmin, rlen = _version_to_mm(rhs)
# if major versions differ, we don't care about the minor length, else
# they should be the same
if lmaj != rmaj:
return lmaj - rmaj
if llen != rlen:
raise Exception('minor version lengths differ: "%s" and "%s"' % (lhs, rhs))
return lmin - rmin
def _version_bump(version):
major, minor, minor_len = _version_to_mm(version)
minor = (minor + 1) % (10 ** minor_len)
if minor == 0:
raise Exception('cannot bump version "%s", requires new major' % version)
return _mm_to_version(major, minor, minor_len)
def _get_repo_version_str(beta):
"""See above for description of this string."""
if beta is not None:
date_str = datetime.date.today().strftime('%Y%m%d')
return 'GOOG;noto-emoji:%s;BETA %s' % (date_str, beta)
p = tool_utils.resolve_path('[emoji]')
commit, date, _ = tool_utils.git_head_commit(p)
if not tool_utils.git_check_remote_commit(p, commit):
raise Exception('emoji not on upstream master branch')
date_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})')
m = date_re.match(date)
if not m:
raise Exception('could not match "%s" with "%s"' % (date, date_re.pattern))
ymd = ''.join(m.groups())
return 'GOOG;noto-emoji:%s:%s' % (ymd, commit[:12])
def _replace_existing_version(lines, version, version_str):
"""Update lines with new version strings in appropriate places."""
saw_nameid = False
for i in range(len(lines)):
line = lines[i]
if saw_nameid:
saw_nameid = False
# preserve indentation
lead_ws = len(line) - len(line.lstrip())
lines[i] = line[:lead_ws] + version_str + '\n'
elif _nameid_re.match(line):
saw_nameid = True
elif _headrev_re.match(line):
lead_ws = len(line) - len(line.lstrip())
lines[i] = line[:lead_ws] + '<fontRevision value="%s"/>\n' % version
def update_version(srcfile, dstfile, version, beta):
"""Update version in srcfile and write to dstfile. If version is None,
bumps the current version, else version must be greater than the
current verison."""
with open(srcfile, 'r') as f:
lines = f.readlines()
current_version = _get_existing_version(lines)
if not version:
version = _version_bump(current_version)
elif version and _version_compare(version, current_version) <= 0:
raise Exception('new version %s is <= current version %s' % (
version, current_version))
version_str = 'Version %s;%s' % (version, _get_repo_version_str(beta))
_replace_existing_version(lines, version, version_str)
with open(dstfile, 'w') as f:
for line in lines:
f.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--version', help='version number, default bumps the current '
'version', metavar='ver')
parser.add_argument(
'-s', '--src', help='ttx file with name and head tables',
metavar='file', required=True)
parser.add_argument(
'-d', '--dst', help='name of edited ttx file to write',
metavar='file', required=True)
parser.add_argument(
'-b', '--beta', help='beta tag if font is built using external resources')
args = parser.parse_args()
update_version(args.src, args.dst, args.version, args.beta)
if __name__ == '__main__':
main()
| 35.507772 | 81 | 0.679119 |
import argparse
import datetime
import re
from nototools import tool_utils
_nameid_re = re.compile(r'\s*<namerecord nameID="5"')
_version_re = re.compile(r'\s*Version\s(\d+.\d{2,3})')
_headrev_re = re.compile(r'\s*<fontRevision value="(\d+.\d{2,3})"/>')
def _get_existing_version(lines):
version = None
def check_version(new_version):
if version is not None and new_version != version:
raise Exception(
'version %s and namerecord version %s do not match' % (
version, new_version))
return new_version
saw_nameid = False
for line in lines:
if saw_nameid:
saw_nameid = False
m = _version_re.match(line)
if not m:
raise Exception('could not match line "%s" in namerecord' % line)
version = check_version(m.group(1))
elif _nameid_re.match(line):
saw_nameid = True
else:
m = _headrev_re.match(line)
if m:
version = check_version(m.group(1))
return version
def _version_to_mm(version):
majs, mins = version.split('.')
minor_len = len(mins)
return int(majs), int(mins), minor_len
def _mm_to_version(major, minor, minor_len):
fmt = '%%d.%%0%dd' % minor_len
return fmt % (major, minor)
def _version_compare(lhs, rhs):
lmaj, lmin, llen = _version_to_mm(lhs)
rmaj, rmin, rlen = _version_to_mm(rhs)
# they should be the same
if lmaj != rmaj:
return lmaj - rmaj
if llen != rlen:
raise Exception('minor version lengths differ: "%s" and "%s"' % (lhs, rhs))
return lmin - rmin
def _version_bump(version):
major, minor, minor_len = _version_to_mm(version)
minor = (minor + 1) % (10 ** minor_len)
if minor == 0:
raise Exception('cannot bump version "%s", requires new major' % version)
return _mm_to_version(major, minor, minor_len)
def _get_repo_version_str(beta):
if beta is not None:
date_str = datetime.date.today().strftime('%Y%m%d')
return 'GOOG;noto-emoji:%s;BETA %s' % (date_str, beta)
p = tool_utils.resolve_path('[emoji]')
commit, date, _ = tool_utils.git_head_commit(p)
if not tool_utils.git_check_remote_commit(p, commit):
raise Exception('emoji not on upstream master branch')
date_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})')
m = date_re.match(date)
if not m:
raise Exception('could not match "%s" with "%s"' % (date, date_re.pattern))
ymd = ''.join(m.groups())
return 'GOOG;noto-emoji:%s:%s' % (ymd, commit[:12])
def _replace_existing_version(lines, version, version_str):
saw_nameid = False
for i in range(len(lines)):
line = lines[i]
if saw_nameid:
saw_nameid = False
# preserve indentation
lead_ws = len(line) - len(line.lstrip())
lines[i] = line[:lead_ws] + version_str + '\n'
elif _nameid_re.match(line):
saw_nameid = True
elif _headrev_re.match(line):
lead_ws = len(line) - len(line.lstrip())
lines[i] = line[:lead_ws] + '<fontRevision value="%s"/>\n' % version
def update_version(srcfile, dstfile, version, beta):
with open(srcfile, 'r') as f:
lines = f.readlines()
current_version = _get_existing_version(lines)
if not version:
version = _version_bump(current_version)
elif version and _version_compare(version, current_version) <= 0:
raise Exception('new version %s is <= current version %s' % (
version, current_version))
version_str = 'Version %s;%s' % (version, _get_repo_version_str(beta))
_replace_existing_version(lines, version, version_str)
with open(dstfile, 'w') as f:
for line in lines:
f.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--version', help='version number, default bumps the current '
'version', metavar='ver')
parser.add_argument(
'-s', '--src', help='ttx file with name and head tables',
metavar='file', required=True)
parser.add_argument(
'-d', '--dst', help='name of edited ttx file to write',
metavar='file', required=True)
parser.add_argument(
'-b', '--beta', help='beta tag if font is built using external resources')
args = parser.parse_args()
update_version(args.src, args.dst, args.version, args.beta)
if __name__ == '__main__':
main()
| true | true |
f71d4000f2d0dd6ef26eda02699bb789c6f1b62b | 622 | py | Python | add_devices.py | hollowpoint/hollowpoint | 6f9d6c6f7147a3cec20d6e772567a29cebe2a365 | [
"Apache-2.0"
] | 1 | 2016-03-31T19:52:03.000Z | 2016-03-31T19:52:03.000Z | add_devices.py | hollowpoint/hollowpoint | 6f9d6c6f7147a3cec20d6e772567a29cebe2a365 | [
"Apache-2.0"
] | null | null | null | add_devices.py | hollowpoint/hollowpoint | 6f9d6c6f7147a3cec20d6e772567a29cebe2a365 | [
"Apache-2.0"
] | null | null | null | import ast
from inventory.models import TaskState, NetDevice
def associate_tasks():
all_tasks = TaskState.objects.all()
# Walk every task and see if it's got devices...
for task in all_tasks:
kwargs = ast.literal_eval(task.kwargs)
devices = kwargs.get('devices', [])
# Convert the device names to objects
objects = NetDevice.objects.filter(node_name__in=devices)
# Associate the devices (if any) w/ the current task
task.devices.add(*objects)
if task.devices.exists():
print 'Added', task.devices.count(), 'devices to task', task.task_id
| 32.736842 | 80 | 0.662379 | import ast
from inventory.models import TaskState, NetDevice
def associate_tasks():
all_tasks = TaskState.objects.all()
for task in all_tasks:
kwargs = ast.literal_eval(task.kwargs)
devices = kwargs.get('devices', [])
# Convert the device names to objects
objects = NetDevice.objects.filter(node_name__in=devices)
# Associate the devices (if any) w/ the current task
task.devices.add(*objects)
if task.devices.exists():
print 'Added', task.devices.count(), 'devices to task', task.task_id
| false | true |
f71d4091699cbd805815ffe4c19da81088333f08 | 976 | py | Python | datan.py | Anton-Mu/finance_sentiment_analysis | e319073646f8b11a3f6b5140137a7f0205918c19 | [
"MIT"
] | null | null | null | datan.py | Anton-Mu/finance_sentiment_analysis | e319073646f8b11a3f6b5140137a7f0205918c19 | [
"MIT"
] | null | null | null | datan.py | Anton-Mu/finance_sentiment_analysis | e319073646f8b11a3f6b5140137a7f0205918c19 | [
"MIT"
] | 1 | 2022-02-08T06:11:51.000Z | 2022-02-08T06:11:51.000Z | import datetime
import time
import tradedate # 获取交易日历
import pandas as pd
import pymysql
import pandas
companys = ['隆基股份', '森特股份', '三峡能源']
work_days = tradedate.catch_url_from_baidu('2022', '1')
db = pymysql.connect(host='localhost', port=3306, user='root', password='', database='spider',
charset='utf8')
cur = db.cursor()
names = locals()
i = 0
for company in companys:
avgs = []
for work_day in work_days:
sql = "SELECT score FROM test WHERE DATE(date)= '" + str(work_day) + "' AND company = %s"
cur.execute(sql, company)
score = cur.fetchall()
scores = []
try:
for i in range(len(score)):
scores.append(score[i][0])
avg = sum(scores) / len(scores)
avgs.append(avg)
except:
print(work_day, score, company)
names['avg_' + str(i)] = avgs
i += 1
db.close()
print(avgs_0, avgs_1, avgs_2)
| 27.111111 | 98 | 0.564549 | import datetime
import time
import tradedate
import pandas as pd
import pymysql
import pandas
companys = ['隆基股份', '森特股份', '三峡能源']
work_days = tradedate.catch_url_from_baidu('2022', '1')
db = pymysql.connect(host='localhost', port=3306, user='root', password='', database='spider',
charset='utf8')
cur = db.cursor()
names = locals()
i = 0
for company in companys:
avgs = []
for work_day in work_days:
sql = "SELECT score FROM test WHERE DATE(date)= '" + str(work_day) + "' AND company = %s"
cur.execute(sql, company)
score = cur.fetchall()
scores = []
try:
for i in range(len(score)):
scores.append(score[i][0])
avg = sum(scores) / len(scores)
avgs.append(avg)
except:
print(work_day, score, company)
names['avg_' + str(i)] = avgs
i += 1
db.close()
print(avgs_0, avgs_1, avgs_2)
| true | true |
f71d40bcf651b54021979c451aaa44ec6a2a88bd | 9,898 | py | Python | lstm_predictor.py | akash13singh/thesis | c9c1aae3545e0412a042f5838e81622c82e8668a | [
"MIT"
] | 215 | 2017-09-25T01:26:32.000Z | 2022-01-25T04:35:04.000Z | lstm_predictor.py | akash13singh/thesis | c9c1aae3545e0412a042f5838e81622c82e8668a | [
"MIT"
] | 8 | 2018-08-04T09:55:45.000Z | 2021-05-10T07:55:44.000Z | lstm_predictor.py | akash13singh/thesis | c9c1aae3545e0412a042f5838e81622c82e8668a | [
"MIT"
] | 95 | 2017-11-22T10:59:51.000Z | 2022-02-08T18:26:39.000Z | import numpy as np
import tensorflow as tf
import random as rn
np.random.seed(123)
rn.seed(123)
#single thread
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(123)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import models.lstm as lstm
import configuration.config as cfg
import matplotlib
if cfg.run_config['Xserver'] == False:
print "No X-server"
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import time
from keras.utils import plot_model
import utilities.utils as util
import numpy as np
import logging
# import plotly
# import plotly.plotly as py
# import plotly.graph_objs as go
# plotly.tools.set_credentials_file(username='aakashsingh', api_key='iMfR7hS1dbnmJ9XB17XO')
import seaborn as sns
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
sns.set_style("whitegrid")
def make_plots(context,predictions_timesteps,true_values,look_ahead,title,path,save_figure,Xserver):
step = 1
if look_ahead > 1:
step = look_ahead - 1
for idx, i in enumerate(np.arange(0, look_ahead, step)):
fig = plt.figure()
#plt.title(title+" Timestep: %d "%i)
plt.xlabel("Time step")
plt.ylabel("Power Consumption")
plt.plot(true_values, label="True value", linewidth=1,color=sns.xkcd_rgb["denim blue"])
plt.plot(predictions_timesteps[:, i], label="Predicted value", linewidth=1, linestyle="--",color=sns.xkcd_rgb["medium green"])
error = abs(true_values - predictions_timesteps[:, i])
plt.plot(error, label='Error',color=sns.xkcd_rgb["pale red"], linewidth=0.5)
plt.legend(bbox_to_anchor=(1, .99))
plt.tight_layout()
if save_figure:
util.save_figure(path,"%s_timestep_%d"%(context,i), fig)
if Xserver:
plt.show()
def get_predictions(context,model,X,y,train_scaler,batch_size,look_ahead,look_back,epochs,experiment_id):
predictions = model.predict(X, batch_size=batch_size)
print predictions.shape
predictions = train_scaler.inverse_transform(predictions)
y = train_scaler.inverse_transform(y)
# extract first timestep for true values
y_true = y[:, 0].flatten()
# diagonals contains a reading's values calculated at different points in time
diagonals = util.get_diagonals(predictions)
# the top left and bottom right predictions do not contain predictions for all timesteps
# fill the missing prediction values in diagonals. curenttly using the first predicted value for all missing timesteps
for idx, diagonal in enumerate(diagonals):
diagonal = diagonal.flatten()
# missing value filled with the first value
diagonals[idx] = np.hstack((diagonal, np.full(look_ahead - len(diagonal), diagonal[0])))
predictions_timesteps = np.asarray(diagonals)
for i in range(look_ahead):
logging.info("%s RMSE on %d timestep prediction %f" % ( context,
(i + 1), mean_squared_error(y_true, predictions_timesteps[:, i]) ** 0.5))
shifted_1 = util.shift_time_series(y_true, 1)
logging.info(" %s RMSE Naive One Timestep Shift %f",context,
mean_squared_error(y_true[1:], shifted_1[1:]) ** 0.5)
title = "Prediction on %s data. %d epochs, look back %d, look_ahead %d & batch_size %d." % (
context, epochs, look_back, look_ahead, batch_size)
path = "%s/%s/"%("imgs",experiment_id)
make_plots(context,predictions_timesteps,y_true,look_ahead,title,path,cfg.run_config['save_figure'],
cfg.run_config['Xserver'])
return predictions_timesteps, y_true
def run():
#load config settings
experiment_id = cfg.run_config['experiment_id']
data_folder = cfg.run_config['data_folder']
look_back = cfg.multi_step_lstm_config['look_back']
look_ahead = cfg.multi_step_lstm_config['look_ahead']
batch_size = cfg.multi_step_lstm_config['batch_size']
epochs = cfg.multi_step_lstm_config['n_epochs']
dropout = cfg.multi_step_lstm_config['dropout']
layers = cfg.multi_step_lstm_config['layers']
loss = cfg.multi_step_lstm_config['loss']
# optimizer = cfg.multi_step_lstm_config['optimizer']
shuffle = cfg.multi_step_lstm_config['shuffle']
patience = cfg.multi_step_lstm_config['patience']
validation = cfg.multi_step_lstm_config['validation']
learning_rate = cfg.multi_step_lstm_config['learning_rate']
logging.info("----------------------------------------------------")
logging.info('Run id %s' % (experiment_id))
logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))
train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)
multistep_lstm = lstm.MultiStepLSTM( look_back=look_back, look_ahead=look_ahead,
layers=layers,
dropout=dropout, loss=loss, learning_rate=learning_rate)
model = multistep_lstm.build_model()
if cfg.run_config['save_figure']:
plot_model(model, to_file="imgs/%s_lstm.png"%(experiment_id), show_shapes=True, show_layer_names=True)
# train model on training set. validation1 set is used for early stopping
fig = plt.figure()
history = lstm.train_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1), patience)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if cfg.run_config['save_figure']:
util.save_figure("%s/%s/" % ("imgs", experiment_id), "train_errors", fig)
validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
print "Validation2 Loss %s" % (validation2_loss)
logging.info("Validation2 Loss %s" % (validation2_loss))
test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
print "Test Loss %s" % (test_loss)
logging.info("Test Loss %s" % (test_loss))
predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
batch_size, look_ahead, look_back, epochs, experiment_id,
)
np.save(data_folder + "train_predictions", predictions_train)
np.save(data_folder + "train_true",y_true_train)
predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
train_scaler, batch_size, look_ahead, look_back,
epochs, experiment_id,
)
predictions_validation1_scaled = train_scaler.transform(predictions_validation1)
print "Calculated validation1 loss %f" % (mean_squared_error(
np.reshape(y_validation1, [len(y_validation1), look_ahead]),
np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead])))
np.save(data_folder + "validation1_predictions", predictions_validation1)
np.save(data_folder + "validation1_true", y_true_validation1)
np.save(data_folder + "validation1_labels", validation2_labels)
predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
train_scaler, batch_size, look_ahead, look_back,
epochs, experiment_id,
)
predictions_validation2_scaled = train_scaler.transform(predictions_validation2)
print "Calculated validation2 loss %f"%(mean_squared_error(
np.reshape(y_validation2, [len(y_validation2), look_ahead]),
np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead])))
np.save(data_folder + "validation2_predictions", predictions_validation2)
np.save(data_folder + "validation2_true", y_true_validation2)
np.save(data_folder + "validation2_labels", validation2_labels)
predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
look_back, epochs, experiment_id,
)
predictions_test_scaled = train_scaler.transform(predictions_test)
print "Calculated test loss %f" % (mean_squared_error( np.reshape(y_test, [len(y_test),look_ahead]),
np.reshape(predictions_test_scaled, [len(predictions_test_scaled),look_ahead])))
np.save(data_folder + "test_predictions", predictions_test)
np.save(data_folder + "test_true", y_true_test)
np.save(data_folder + "test_labels", test_labels)
logging.info("-------------------------run complete----------------------------------------------")
if __name__ == "__main__":
# load config params
FORMAT = '%(asctime)-15s. %(message)s'
logger = logging.basicConfig(filename=cfg.run_config['log_file'], level=logging.INFO, format=FORMAT)
run()
logging.info("")
| 46.688679 | 138 | 0.66569 | import numpy as np
import tensorflow as tf
import random as rn
np.random.seed(123)
rn.seed(123)
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(123)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import models.lstm as lstm
import configuration.config as cfg
import matplotlib
if cfg.run_config['Xserver'] == False:
print "No X-server"
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import time
from keras.utils import plot_model
import utilities.utils as util
import numpy as np
import logging
import seaborn as sns
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
sns.set_style("whitegrid")
def make_plots(context,predictions_timesteps,true_values,look_ahead,title,path,save_figure,Xserver):
step = 1
if look_ahead > 1:
step = look_ahead - 1
for idx, i in enumerate(np.arange(0, look_ahead, step)):
fig = plt.figure()
plt.xlabel("Time step")
plt.ylabel("Power Consumption")
plt.plot(true_values, label="True value", linewidth=1,color=sns.xkcd_rgb["denim blue"])
plt.plot(predictions_timesteps[:, i], label="Predicted value", linewidth=1, linestyle="--",color=sns.xkcd_rgb["medium green"])
error = abs(true_values - predictions_timesteps[:, i])
plt.plot(error, label='Error',color=sns.xkcd_rgb["pale red"], linewidth=0.5)
plt.legend(bbox_to_anchor=(1, .99))
plt.tight_layout()
if save_figure:
util.save_figure(path,"%s_timestep_%d"%(context,i), fig)
if Xserver:
plt.show()
def get_predictions(context,model,X,y,train_scaler,batch_size,look_ahead,look_back,epochs,experiment_id):
predictions = model.predict(X, batch_size=batch_size)
print predictions.shape
predictions = train_scaler.inverse_transform(predictions)
y = train_scaler.inverse_transform(y)
y_true = y[:, 0].flatten()
diagonals = util.get_diagonals(predictions)
# the top left and bottom right predictions do not contain predictions for all timesteps
# fill the missing prediction values in diagonals. curenttly using the first predicted value for all missing timesteps
for idx, diagonal in enumerate(diagonals):
diagonal = diagonal.flatten()
# missing value filled with the first value
diagonals[idx] = np.hstack((diagonal, np.full(look_ahead - len(diagonal), diagonal[0])))
predictions_timesteps = np.asarray(diagonals)
for i in range(look_ahead):
logging.info("%s RMSE on %d timestep prediction %f" % ( context,
(i + 1), mean_squared_error(y_true, predictions_timesteps[:, i]) ** 0.5))
shifted_1 = util.shift_time_series(y_true, 1)
logging.info(" %s RMSE Naive One Timestep Shift %f",context,
mean_squared_error(y_true[1:], shifted_1[1:]) ** 0.5)
title = "Prediction on %s data. %d epochs, look back %d, look_ahead %d & batch_size %d." % (
context, epochs, look_back, look_ahead, batch_size)
path = "%s/%s/"%("imgs",experiment_id)
make_plots(context,predictions_timesteps,y_true,look_ahead,title,path,cfg.run_config['save_figure'],
cfg.run_config['Xserver'])
return predictions_timesteps, y_true
def run():
#load config settings
experiment_id = cfg.run_config['experiment_id']
data_folder = cfg.run_config['data_folder']
look_back = cfg.multi_step_lstm_config['look_back']
look_ahead = cfg.multi_step_lstm_config['look_ahead']
batch_size = cfg.multi_step_lstm_config['batch_size']
epochs = cfg.multi_step_lstm_config['n_epochs']
dropout = cfg.multi_step_lstm_config['dropout']
layers = cfg.multi_step_lstm_config['layers']
loss = cfg.multi_step_lstm_config['loss']
# optimizer = cfg.multi_step_lstm_config['optimizer']
shuffle = cfg.multi_step_lstm_config['shuffle']
patience = cfg.multi_step_lstm_config['patience']
validation = cfg.multi_step_lstm_config['validation']
learning_rate = cfg.multi_step_lstm_config['learning_rate']
logging.info("----------------------------------------------------")
logging.info('Run id %s' % (experiment_id))
logging.info(" HYPERPRAMRAMS : %s" % (str(locals())))
train_scaler, X_train, y_train, X_validation1, y_validation1, X_validation2, y_validation2, validation2_labels, \
X_test, y_test, test_labels = util.load_data(data_folder, look_back, look_ahead)
multistep_lstm = lstm.MultiStepLSTM( look_back=look_back, look_ahead=look_ahead,
layers=layers,
dropout=dropout, loss=loss, learning_rate=learning_rate)
model = multistep_lstm.build_model()
if cfg.run_config['save_figure']:
plot_model(model, to_file="imgs/%s_lstm.png"%(experiment_id), show_shapes=True, show_layer_names=True)
# train model on training set. validation1 set is used for early stopping
fig = plt.figure()
history = lstm.train_model(model, X_train, y_train, batch_size, epochs, shuffle, validation, (X_validation1, y_validation1), patience)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
if cfg.run_config['save_figure']:
util.save_figure("%s/%s/" % ("imgs", experiment_id), "train_errors", fig)
validation2_loss = model.evaluate(X_validation2, y_validation2, batch_size=batch_size, verbose=2)
print "Validation2 Loss %s" % (validation2_loss)
logging.info("Validation2 Loss %s" % (validation2_loss))
test_loss = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=2)
print "Test Loss %s" % (test_loss)
logging.info("Test Loss %s" % (test_loss))
predictions_train, y_true_train = get_predictions("Train", model, X_train, y_train, train_scaler,
batch_size, look_ahead, look_back, epochs, experiment_id,
)
np.save(data_folder + "train_predictions", predictions_train)
np.save(data_folder + "train_true",y_true_train)
predictions_validation1, y_true_validation1 = get_predictions("Validation1", model, X_validation1, y_validation1,
train_scaler, batch_size, look_ahead, look_back,
epochs, experiment_id,
)
predictions_validation1_scaled = train_scaler.transform(predictions_validation1)
print "Calculated validation1 loss %f" % (mean_squared_error(
np.reshape(y_validation1, [len(y_validation1), look_ahead]),
np.reshape(predictions_validation1_scaled, [len(predictions_validation1_scaled), look_ahead])))
np.save(data_folder + "validation1_predictions", predictions_validation1)
np.save(data_folder + "validation1_true", y_true_validation1)
np.save(data_folder + "validation1_labels", validation2_labels)
predictions_validation2, y_true_validation2 = get_predictions("Validation2", model, X_validation2, y_validation2,
train_scaler, batch_size, look_ahead, look_back,
epochs, experiment_id,
)
predictions_validation2_scaled = train_scaler.transform(predictions_validation2)
print "Calculated validation2 loss %f"%(mean_squared_error(
np.reshape(y_validation2, [len(y_validation2), look_ahead]),
np.reshape(predictions_validation2_scaled, [len(predictions_validation2_scaled), look_ahead])))
np.save(data_folder + "validation2_predictions", predictions_validation2)
np.save(data_folder + "validation2_true", y_true_validation2)
np.save(data_folder + "validation2_labels", validation2_labels)
predictions_test, y_true_test = get_predictions("Test", model, X_test, y_test, train_scaler, batch_size, look_ahead,
look_back, epochs, experiment_id,
)
predictions_test_scaled = train_scaler.transform(predictions_test)
print "Calculated test loss %f" % (mean_squared_error( np.reshape(y_test, [len(y_test),look_ahead]),
np.reshape(predictions_test_scaled, [len(predictions_test_scaled),look_ahead])))
np.save(data_folder + "test_predictions", predictions_test)
np.save(data_folder + "test_true", y_true_test)
np.save(data_folder + "test_labels", test_labels)
logging.info("-------------------------run complete----------------------------------------------")
if __name__ == "__main__":
# load config params
FORMAT = '%(asctime)-15s. %(message)s'
logger = logging.basicConfig(filename=cfg.run_config['log_file'], level=logging.INFO, format=FORMAT)
run()
logging.info("")
| false | true |
f71d40ea4ac12f604898bb136a6e2b9b0a5f4e83 | 1,146 | py | Python | out/euler04.py | Melyodas/metalang | 399a9f1a71402c979d7f8024d4f98f081c80e771 | [
"BSD-2-Clause"
] | 22 | 2017-04-24T10:00:45.000Z | 2021-04-01T10:11:05.000Z | out/euler04.py | Melyodas/metalang | 399a9f1a71402c979d7f8024d4f98f081c80e771 | [
"BSD-2-Clause"
] | 12 | 2017-03-26T18:34:21.000Z | 2019-03-21T19:13:03.000Z | out/euler04.py | Melyodas/metalang | 399a9f1a71402c979d7f8024d4f98f081c80e771 | [
"BSD-2-Clause"
] | 7 | 2017-10-14T13:33:33.000Z | 2021-03-18T15:18:50.000Z | import math
def mod(x, y):
return x - y * math.trunc(x / y)
"""(a + b * 10 + c * 100) * (d + e * 10 + f * 100) =
a * d + a * e * 10 + a * f * 100 +
10 * (b * d + b * e * 10 + b * f * 100)+
100 * (c * d + c * e * 10 + c * f * 100) =
a * d + a * e * 10 + a * f * 100 +
b * d * 10 + b * e * 100 + b * f * 1000 +
c * d * 100 + c * e * 1000 + c * f * 10000 =
a * d +
10 * ( a * e + b * d) +
100 * (a * f + b * e + c * d) +
(c * e + b * f) * 1000 +
c * f * 10000"""
def chiffre(c, m):
if c == 0:
return mod(m, 10)
else:
return chiffre(c - 1, math.trunc(m / 10))
m = 1
for a in range(0, 10):
for f in range(1, 10):
for d in range(0, 10):
for c in range(1, 10):
for b in range(0, 10):
for e in range(0, 10):
mul = a * d + 10 * (a * e + b * d) + 100 * (a * f + b * e + c * d) + 1000 * (c * e + b * f) + 10000 * c * f
if chiffre(0, mul) == chiffre(5, mul) and chiffre(1, mul) == chiffre(4, mul) and chiffre(2, mul) == chiffre(3, mul):
m = max(mul, m)
print("%d\n" % m, end='')
| 30.972973 | 140 | 0.383944 | import math
def mod(x, y):
return x - y * math.trunc(x / y)
def chiffre(c, m):
if c == 0:
return mod(m, 10)
else:
return chiffre(c - 1, math.trunc(m / 10))
m = 1
for a in range(0, 10):
for f in range(1, 10):
for d in range(0, 10):
for c in range(1, 10):
for b in range(0, 10):
for e in range(0, 10):
mul = a * d + 10 * (a * e + b * d) + 100 * (a * f + b * e + c * d) + 1000 * (c * e + b * f) + 10000 * c * f
if chiffre(0, mul) == chiffre(5, mul) and chiffre(1, mul) == chiffre(4, mul) and chiffre(2, mul) == chiffre(3, mul):
m = max(mul, m)
print("%d\n" % m, end='')
| true | true |
f71d40f2d3759b44ca62a6228b24e192e7a00be9 | 1,008 | py | Python | pro_mqtt/mqtt_le1.py | yongfang117/pro_useful_code | 63ae6891d3be243c5c46329e65fcf47133c5890f | [
"MIT"
] | null | null | null | pro_mqtt/mqtt_le1.py | yongfang117/pro_useful_code | 63ae6891d3be243c5c46329e65fcf47133c5890f | [
"MIT"
] | null | null | null | pro_mqtt/mqtt_le1.py | yongfang117/pro_useful_code | 63ae6891d3be243c5c46329e65fcf47133c5890f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
时间: 2019/11/24 16:29
作者: lyf
更改记录:
重要说明:
"""
# 关键指令:
# # 1.导入包
# import paho.mqtt.client as mqtt
# # 2.创建client对象
# client = mqtt.Client(id)
# # 3.连接
# client.connect(host, post)
# # 4.订阅
# client.subscribe(topic)
# client.on_message=func #接收到信息后的处理函数
# # 5.发布
# client.publish(topic,payload)
import paho.mqtt.client as mqtt
import sys
# host="192.168.45.3"
host="127.0.0.1"
topic_sub = "Question"
topic_pub = "temperature"
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(topic_sub)
def on_message(client, userdata, msg):
print(msg.payload)
client.publish(topic_pub, "37°")
def main(argv=None): # argv是sys模块下的方法用于接收命令行传参
# 声明客户端
client=mqtt.Client()
# 连接
client.connect(host,1883,60)
# 两个回调函数,用于执行连接成功和接收到信息要做的事
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
# if __name__ == "__main__":
# sys.exit(main())
main() | 18.666667 | 50 | 0.666667 |
sys
host="127.0.0.1"
topic_sub = "Question"
topic_pub = "temperature"
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(topic_sub)
def on_message(client, userdata, msg):
print(msg.payload)
client.publish(topic_pub, "37°")
def main(argv=None):
client=mqtt.Client()
client.connect(host,1883,60)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
main() | true | true |
f71d41eed59231c857476d3caab9e2db2e5c6a47 | 6,089 | py | Python | test.py | jssprz/attentive-visual-semantic-specialized-network-for-video-captioning | 00815884ba892c00db2d3778bd0083618ff6d2d7 | [
"MIT"
] | 10 | 2020-10-18T04:35:38.000Z | 2021-02-01T13:01:10.000Z | test.py | jssprz/attentive-visual-semantic-specialized-network-for-video-captioning | 00815884ba892c00db2d3778bd0083618ff6d2d7 | [
"MIT"
] | 6 | 2020-11-06T03:07:47.000Z | 2021-02-15T15:39:49.000Z | test.py | jssprz/attentive-visual-semantic-specialized-network-for-video-captioning | 00815884ba892c00db2d3778bd0083618ff6d2d7 | [
"MIT"
] | 2 | 2021-05-03T08:26:29.000Z | 2022-03-09T09:20:39.000Z | import os
import argparse
import pickle
from utils import decode_from_tokens
from vocabulary import Vocabulary
from configuration_file import ConfigurationFile
from model.encoder import Encoder
from model.decoder import AVSSNDecoder
import h5py
import torch
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate captions por test samples')
parser.add_argument('-chckpt', '--checkpoint_path', type=str, default='pretrain/chckpt.pt',
help='Set the path to pre-trained model (default is pretrain/chckpt.pt).')
parser.add_argument('-data', '--dataset_folder', type=str, default='data/MSVD',
help='Set the path to dataset folder (default is data/MSVD).')
parser.add_argument('-out', '--output_folder', type=str, default='results/MSVD',
help='Set the path to output folder (default is results/MSVD).')
args = parser.parse_args()
# load vocabulary
with open(os.path.join(args.dataset_folder, 'corpus.pkl'), "rb") as f:
corpus = pickle.load(f)
idx2word_dict = corpus[4]
vocab = Vocabulary.from_idx2word_dict(idx2word_dict, False)
print('Size of vocabulary: {}'.format(len(vocab)))
# Pretrained Embedding
pretrained_embedding = torch.Tensor(corpus[5])
#max_frames = 20 #30
cnn_feature_size = 2048
c3d_feature_size = 4096
i3d_feature_size = 400
res_eco_features_size = 3584
projected_size = 512
hidden_size = 1024 # Number of hidden layer units of the cyclic network
mid_size = 128 # The middle of the boundary detection layer represents the dimension
n_tags = 300
global_tagger_hidden_size = 1024
specific_tagger_hidden_size = 128
hidden_size = 1024
embedding_size = 300 #1024
rnn_in_size = 300 #1024
rnn_hidden_size = 1024
config = ConfigurationFile(os.path.join(args.dataset_folder, 'config.ini'), 'attn-vscn-max')
# Models
encoder = Encoder(cnn_feature_size=cnn_feature_size,
c3d_feature_size=c3d_feature_size,
i3d_feature_size=i3d_feature_size,
n_tags=n_tags,
hidden_size=hidden_size,
global_tagger_hidden_size=global_tagger_hidden_size,
specific_tagger_hidden_size=specific_tagger_hidden_size,
n_layers=config.encoder_num_layers,
input_dropout_p=config.encoder_dropout_p,
rnn_dropout_p=config.encoder_dropout_p,
bidirectional=config.encoder_bidirectional,
rnn_cell=config.encoder_rnn_cell,
device='cpu')
decoder = AVSSNDecoder(in_seq_length=config.max_frames,
out_seq_length=config.max_words,
n_feats=res_eco_features_size + 512,
n_tags=n_tags,
embedding_size=embedding_size,
pretrained_embedding=pretrained_embedding,
hidden_size=hidden_size,
rnn_in_size=rnn_in_size,
rnn_hidden_size=rnn_hidden_size,
vocab=vocab,
device='cpu',
rnn_cell=config.decoder_rnn_cell,
encoder_num_layers=config.encoder_num_layers,
encoder_bidirectional=config.encoder_bidirectional,
num_layers=config.decoder_num_layers,
dropout_p=config.decoder_dropout_p,
beam_size=config.decoder_beam_size,
temperature=config.decoder_temperature,
train_sample_max=config.decoder_train_sample_max,
test_sample_max=config.decoder_test_sample_max,
beam_search_logic = config.decoder_beam_search_logic)
# Checkpoint
checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
# 1. filter out unnecessary keys for encoder
chckpt_dict = {k: v for k, v in checkpoint['encoder'].items() if k not in ['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias']}
encoder_dict = encoder.state_dict()
encoder_dict.update(chckpt_dict)
encoder.load_state_dict(encoder_dict)
decoder.load_state_dict(checkpoint['decoder'])
#load test set features
test_vidxs = sorted(list(set(corpus[2][1])))
with h5py.File(os.path.join(args.dataset_folder, config.features_path), 'r') as feats_file:
print('loading visual feats...')
dataset = feats_file[config.dataset_name]
cnn_feats = torch.from_numpy(dataset['cnn_features'][test_vidxs]).float()
c3d_feats = torch.from_numpy(dataset['c3d_features'][test_vidxs]).float()
cnn_globals = torch.zeros(cnn_feats.size(0), 512) # torch.from_numpy(dataset['cnn_globals'][test_vidxs]).float()
cnn_sem_globals = torch.from_numpy(dataset['cnn_sem_globals'][test_vidxs]).float()
f_counts = dataset['count_features'][test_vidxs]
print('visual feats loaded')
res_eco_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'resnext_eco.npy'))[test_vidxs])
tags_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'tag_feats.npy'))[test_vidxs])
encoder.eval()
decoder.eval()
with torch.no_grad():
video_encoded = encoder(cnn_feats, c3d_feats, cnn_globals, tags_globals, res_eco_globals)
logits, tokens = decoder(video_encoded, None, teacher_forcing_ratio=0)
scores = logits.max(dim=2)[0].mean(dim=1)
confidences, sentences = [], []
for score, seq in zip(scores, tokens):
s = decode_from_tokens(seq, vocab)
print(score, s)
sentences.append(s)
confidences.append(score)
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
with open(os.path.join(args.output_folder, 'predictions.txt'), 'w') as fo:
for vidx, sentence in zip(test_vidxs, sentences):
fo.write(f'{vidx}\t{sentence}\n')
| 42.880282 | 129 | 0.654459 | import os
import argparse
import pickle
from utils import decode_from_tokens
from vocabulary import Vocabulary
from configuration_file import ConfigurationFile
from model.encoder import Encoder
from model.decoder import AVSSNDecoder
import h5py
import torch
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate captions por test samples')
parser.add_argument('-chckpt', '--checkpoint_path', type=str, default='pretrain/chckpt.pt',
help='Set the path to pre-trained model (default is pretrain/chckpt.pt).')
parser.add_argument('-data', '--dataset_folder', type=str, default='data/MSVD',
help='Set the path to dataset folder (default is data/MSVD).')
parser.add_argument('-out', '--output_folder', type=str, default='results/MSVD',
help='Set the path to output folder (default is results/MSVD).')
args = parser.parse_args()
with open(os.path.join(args.dataset_folder, 'corpus.pkl'), "rb") as f:
corpus = pickle.load(f)
idx2word_dict = corpus[4]
vocab = Vocabulary.from_idx2word_dict(idx2word_dict, False)
print('Size of vocabulary: {}'.format(len(vocab)))
pretrained_embedding = torch.Tensor(corpus[5])
cnn_feature_size = 2048
c3d_feature_size = 4096
i3d_feature_size = 400
res_eco_features_size = 3584
projected_size = 512
hidden_size = 1024
mid_size = 128
n_tags = 300
global_tagger_hidden_size = 1024
specific_tagger_hidden_size = 128
hidden_size = 1024
embedding_size = 300
rnn_in_size = 300
rnn_hidden_size = 1024
config = ConfigurationFile(os.path.join(args.dataset_folder, 'config.ini'), 'attn-vscn-max')
encoder = Encoder(cnn_feature_size=cnn_feature_size,
c3d_feature_size=c3d_feature_size,
i3d_feature_size=i3d_feature_size,
n_tags=n_tags,
hidden_size=hidden_size,
global_tagger_hidden_size=global_tagger_hidden_size,
specific_tagger_hidden_size=specific_tagger_hidden_size,
n_layers=config.encoder_num_layers,
input_dropout_p=config.encoder_dropout_p,
rnn_dropout_p=config.encoder_dropout_p,
bidirectional=config.encoder_bidirectional,
rnn_cell=config.encoder_rnn_cell,
device='cpu')
decoder = AVSSNDecoder(in_seq_length=config.max_frames,
out_seq_length=config.max_words,
n_feats=res_eco_features_size + 512,
n_tags=n_tags,
embedding_size=embedding_size,
pretrained_embedding=pretrained_embedding,
hidden_size=hidden_size,
rnn_in_size=rnn_in_size,
rnn_hidden_size=rnn_hidden_size,
vocab=vocab,
device='cpu',
rnn_cell=config.decoder_rnn_cell,
encoder_num_layers=config.encoder_num_layers,
encoder_bidirectional=config.encoder_bidirectional,
num_layers=config.decoder_num_layers,
dropout_p=config.decoder_dropout_p,
beam_size=config.decoder_beam_size,
temperature=config.decoder_temperature,
train_sample_max=config.decoder_train_sample_max,
test_sample_max=config.decoder_test_sample_max,
beam_search_logic = config.decoder_beam_search_logic)
checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
chckpt_dict = {k: v for k, v in checkpoint['encoder'].items() if k not in ['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias']}
encoder_dict = encoder.state_dict()
encoder_dict.update(chckpt_dict)
encoder.load_state_dict(encoder_dict)
decoder.load_state_dict(checkpoint['decoder'])
test_vidxs = sorted(list(set(corpus[2][1])))
with h5py.File(os.path.join(args.dataset_folder, config.features_path), 'r') as feats_file:
print('loading visual feats...')
dataset = feats_file[config.dataset_name]
cnn_feats = torch.from_numpy(dataset['cnn_features'][test_vidxs]).float()
c3d_feats = torch.from_numpy(dataset['c3d_features'][test_vidxs]).float()
cnn_globals = torch.zeros(cnn_feats.size(0), 512)
cnn_sem_globals = torch.from_numpy(dataset['cnn_sem_globals'][test_vidxs]).float()
f_counts = dataset['count_features'][test_vidxs]
print('visual feats loaded')
res_eco_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'resnext_eco.npy'))[test_vidxs])
tags_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'tag_feats.npy'))[test_vidxs])
encoder.eval()
decoder.eval()
with torch.no_grad():
video_encoded = encoder(cnn_feats, c3d_feats, cnn_globals, tags_globals, res_eco_globals)
logits, tokens = decoder(video_encoded, None, teacher_forcing_ratio=0)
scores = logits.max(dim=2)[0].mean(dim=1)
confidences, sentences = [], []
for score, seq in zip(scores, tokens):
s = decode_from_tokens(seq, vocab)
print(score, s)
sentences.append(s)
confidences.append(score)
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
with open(os.path.join(args.output_folder, 'predictions.txt'), 'w') as fo:
for vidx, sentence in zip(test_vidxs, sentences):
fo.write(f'{vidx}\t{sentence}\n')
| true | true |
f71d42f855d41d0b78af8b30cec0f6bd6a6b4519 | 97,512 | py | Python | external/synple/synple.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | null | null | null | external/synple/synple.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | null | null | null | external/synple/synple.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Python wrapper for synspec
Calculation of synthetic spectra of stars and convolution with a rotational/Gaussian kernel.
Makes the use of synspec simpler, and retains the main functionalities (when used from
python). The command line interface is even simpler but fairly limited.
For information on
synspec visit http://nova.astro.umd.edu/Synspec43/synspec.html.
Example
-------
To compute the solar spectrum between 6160 and 6164 angstroms, using a model atmosphere in
the file sun.mod (provided with the distribution), with the output going into the file
sun.syn
$synple.py sun.mod 6160. 6164.
To force a micro of 1.1 km/s, and convolve the spectrum with a Gaussian kernel with a fwhm
of 0.1 angstroms
$synple.py sun.mod 6160. 6164. 1.1 0.1
To perform the calculations above in python and compare the emergent normalized profiles
>>> from synple import syn
>>> x, y, z = syn('sun.mod', (6160.,6164.))
>>> x2, y2, z2 = syn('sun.mod', (6160.,6164.), vmicro=1.1, fwhm=0.1)
in plain python
>>> import matplotlib.pyplot as plt
>>> plt.ion()
>>> plt.plot(x,y/z, x2, y2/z2)
or ipython
In [1]: %pylab
In [2]: plot(x,y/z, x2, y2/z2)
"""
import os
import sys
import subprocess
import numpy as np
import glob
import time
import copy
import gzip
from scipy import interpolate
import matplotlib.pyplot as plt
from itertools import product
#configuration
#synpledir = /home/callende/synple
synpledir = os.path.dirname(os.path.realpath(__file__))
#relative paths
modeldir = synpledir + "/models"
modelatomdir = synpledir + "/data"
linelistdir = synpledir + "/linelists"
bindir = synpledir + "/bin"
synspec = bindir + "/s54d"
rotin = bindir + "/rotin3"
#other stuff
clight = 299792.458
epsilon = 0.6 #clv coeff.
bolk = 1.38054e-16 # erg/ K
zero = " 0 "
one = " 1 "
two = " 2 "
def syn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, tmpdir=None):
"""Computes a synthetic spectrum
Interface to the fortran codes synspec/rotin that only requires two mandatory inputs:
a model atmosphere (modelfile) and the limits of the spectral range (wrange). The code
recognizes Kurucz, MARCS and Phoenix LTE model atmospheres. The sampling of the frequency
grid is chosen internally, but can also be set by adding a constant wavelength step (dw).
The abundances and microturbulence velocity can be set through the abu and vmicro
parameters, but default values will be taken from the model atmosphere. Rotational and
Gaussian broadening can be introduced (vrot and fwhm parameters). The computed spectrum
can be written to a file (save == True).
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
tmpdir: string
when is not None a temporary directory with this name will be created to store
the temporary synspec input/output files, and the synple log file (usually named
syn.log) will be named as tmpdir_syn.log.
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
#basic checks on the line list and model atmosphere
checksynspec(linelist,modelfile)
#read model atmosphere
atmostype, teff, logg, vmicro2, abu2, nd, atmos = read_model(modelfile)
if vmicro == None: vmicro = vmicro2
if abu == None: abu = abu2
if dw == None:
#space = 1e-2
space = np.mean(wrange) * np.sqrt(9.12e-15 * np.min(atmos['t']) + vmicro** 2) / clight / 3.
else:
space = dw
#check input parameters are valid
imode = checkinput(wrange, vmicro, linelist)
print ('teff,logg,vmicro=',teff,logg,vmicro)
#print ('abu=',abu)
#print (len(abu))
#print ('nd=',nd)
#print ('linelist=',linelist)
#print ('wrange=',wrange)
logfile = 'syn.log'
if tmpdir is not None:
startdir = os.getcwd()
logfile = os.path.join(startdir,os.path.split(tmpdir)[-1]) + "_" + logfile
try:
os.mkdir(tmpdir)
except OSError:
print( "cannot create tmpdir %s " % (tmpdir) )
try:
os.chdir(tmpdir)
except OSError:
print("cannot enter tmpdir %s " % (tmpdir) )
cleanup()
writetas('tas',nd,linelist) #non-std param. file
write5(teff,logg,abu,atom) #abundance/opacity file
write8(teff,logg,nd,atmos,atmostype) #model atmosphere
write55(wrange,space,imode,2,strength,vmicro,linelist,atmostype) #synspec control file
create_links(linelist) #auxiliary data
if compute == False:
wave = None
flux = None
cont = None
else:
synin = open('fort.5')
synout = open(logfile,'w')
start = time.time()
p = subprocess.Popen([synspec], stdin=synin, stdout = synout, stderr= synout, shell=True)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.7')), 'Error: I cannot read the file *fort.7* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
assert (os.path.isfile('fort.17')), 'Error: I cannot read the file *fort.17* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
wave, flux = np.loadtxt('fort.7', unpack=True)
wave2, flux2 = np.loadtxt('fort.17', unpack=True)
if dw == None and fwhm <= 0. and vrot <= 0.: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('syn ellapsed time ',end - start, 'seconds')
if fwhm > 0. or vrot > 0.:
start = time.time()
print( vrot, fwhm, space, steprot, stepfwhm)
wave, flux = call_rotin (wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
if dw == None: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
if (dw != None):
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave3 = np.arange(nsamples)*dw + wrange[0]
#flux = np.interp(wave3, wave, flux)
flux = interp_spl(wave3, wave, flux)
cont = np.interp(wave3, wave2, flux2)
wave = wave3
if clean == True: cleanup()
if tmpdir is not None:
try:
os.chdir(startdir)
except OSError:
print("cannot change directory from tmpdir %s to startdir %s" % (tmpdir,startdir) )
if clean == True:
try:
os.rmdir(tmpdir)
except OSError:
print("cannot remove directory tmpdir %s" % (tmpdir) )
if save == True:
if synfile == None:
tmpstr = os.path.split(modelfile)[-1]
synfile = tmpstr[:tmpstr.rfind('.')]+'.syn'
np.savetxt(synfile,(wave,flux,cont))
return(wave, flux, cont)
def mpsyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
"""Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations
Wrapper for syn, using multiprocessing, to speed-up the calculation of a broad spectral range
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
from multiprocessing import Pool,cpu_count
if nthreads == 0:
nthreads = cpu_count()
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
pararr = [modelfile, wrange1, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, 'par'+str(i) ]
pars.append(pararr)
pool = Pool(nthreads)
results = pool.starmap(syn,pars)
pool.close()
pool.join()
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def raysyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
"""Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations
Wrapper for syn, using ray, to speed-up the calculation of a broad spectral range
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
import psutil
import ray
@ray.remote
def fun(vari,cons):
wrange,tmpdir = vari
modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute = cons
x, y, z = syn(modelfile, wrange, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, tmpdir)
return(x,y,z)
if nthreads == 0:
nthreads = psutil.cpu_count(logical=False)
print('nthreads=',nthreads)
ray.init(num_cpus=nthreads)
rest = [ modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute ]
constants = ray.put(rest)
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
folder = 'par'+str(i)
pararr = [wrange1, 'par'+str(i) ]
pars.append(pararr)
results = ray.get([fun.remote(pars[i],constants) for i in range(nthreads)])
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def multisyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None, nthreads=1):
"""Computes synthetic spectra for a list of files. The values of vmicro, vrot,
fwhm, and nfe can be iterables. Whether or not dw is specified the results will be
placed on a common wavelength scale by interpolation. When not specified, dw will be
chosen as appropriate for the first model in modelfiles.
Parameters
----------
modelfiles : list of str
files with model atmospheres
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
wavelength step for the output fluxes.
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
vmicro: float, optional, can be an iterable
microturbulence (km/s)
(default is taken from the model atmosphere)
vrot: float, can be an iterable
projected rotational velocity (km/s)
(default 0.)
fwhm: float, can be an iterable
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
nfe: float, can be an iterable
[N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)
(default 0.)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectra to files (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
if multiple values of vmicro, vrot, fwhm or nfe are used, their values are
prepended to the file names
(default None)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats (1D)
wavelengths (angstroms)
flux: numpy array of floats (2D -- as many rows as models input)
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats (2D -- as many rows as models input)
continuum flux (same units as flux)
"""
#when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
assert (len(modelfiles) > 0), 'multisyn needs at least one model to work with'
wave = None
flux = None
cont = None
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
abu1 = copy.copy(abu)
#if need be, adjust nitrogen abundance according to nfe
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = mpsyn(entry, wrange, dw=None, strength=strength, \
vmicro=vmicro1, abu=abu1, linelist=linelist, atom=atom, \
clean=clean, save=save, nthreads=nthreads)
space = np.mean(np.diff(x))
for vrot1 in vrots:
for fwhm1 in fwhms:
if fwhm1> 0. or vrot1 > 0.:
start = time.time()
print( entry, vmicro1, nfe1, vrot1, fwhm1, space)
x2, y2 = call_rotin (x, y, vrot, fwhm, space, steprot, stepfwhm, \
clean=False, reuseinputfiles=True)
z2 = np.interp(x2, x, z)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
else:
x2, y2, z2 = x, y, z
if entry == modelfiles[0] and vmicro1 == vmicros[0] and vrot1 == vrots[0] and fwhm1 == fwhms[0] and nfe1 == nfes[0]:
if dw == None: dw = np.median(np.diff(x2))
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave = np.arange(nsamples)*dw + wrange[0]
#flux = np.interp(wave, x2, y2)
flux = interp_spl(wave, x2, y2)
cont = np.interp(wave, x2, z2)
else:
#flux = np.vstack ( (flux, np.interp(wave, x, y) ) )
flux = np.vstack ( (flux, interp_spl(wave, x, y) ) )
cont = np.vstack ( (cont, np.interp(wave, x, z) ) )
return(wave, flux, cont)
def polysyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None):
"""Sets up a directory tree for computing synthetic spectra for a list of files in
parallel. The values of vmicro, vrot, fwhm, and nfe can be iterables. Whether or not
dw is specified the results will be placed on a common wavelength scale by interpolation.
When not specified, dw will be chosen as appropriate for the first model in modelfiles.
Parameters
----------
modelfiles : list of str
files with model atmospheres
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
vmicro: float, optional, can be an iterable
microturbulence (km/s)
(default is taken from the model atmosphere)
vrot: float, can be an iterable
projected rotational velocity (km/s)
(default 0.)
fwhm: float, can be an iterable
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
nfe: float, can be an iterable
[N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)
(default 0.)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectra to files (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
if multiple values of vmicro, vrot, fwhm or nfe are used, their values are
prepended to the file names
(default None)
Returns
-------
wave: numpy array of floats (1D)
wavelengths (angstroms)
flux: numpy array of floats (2D -- as many rows as models input)
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats (2D -- as many rows as models input)
continuum flux (same units as flux)
"""
#synspec does not currently run in parallel
nthreads = 1
#when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
idir = 0
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
if entry == 'missing':
pass
else:
#setup the slurm script
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH -t 04:00:00"+" \n") #hh:mm:ss
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu1 = copy.copy(abu)
#if need be, adjust nitrogen abundance according to nfe
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = syn(entry, wrange, dw=None, strength=strength, vmicro=vmicro1, \
abu=abu1, linelist=linelist, atom=atom, compute=False)
s.write(synspec+" < "+"fort.5"+"\n")
si = open("fort.55",'r')
for i in range(6): line = si.readline()
entries = line.split()
space = float(entries[5])
si.close()
iconv = 0
for vrot1 in vrots:
for fwhm1 in fwhms:
print('iconv=',iconv)
iconv = iconv + 1
inconv = ("%07dfort.5" % (iconv) )
outconv = ("'%07dfort.7'" % (iconv) )
if fwhm1> 0. or vrot1 > 0.:
f = open(inconv,'w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", outconv) )
f.write( ' %f %f %f \n' % (vrot1, space, steprot) )
f.write( ' %f %f \n' % (fwhm1, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (wrange[0], wrange[1], 0) )
f.close()
s.write(rotin+" < "+inconv+"\n")
else:
s.write("cp "+" fort.7 "+outconv[1:-1]+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return(None,None,None)
def polyopt(wrange=(9.e2,1.e5),dw=0.1,strength=1e-3, linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], \
tlt = (20,3.08,0.068), tlrho = (20,-14.0,0.59), \
tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), \
tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), tvmicro=(1,1.0,0.0), \
zexclude=None, atom='ap18'):
"""Sets up a directory tree for computing opacity tables for TLUSTY. The table collection forms
a regular grid defined by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe),
[N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since
arrays with just one 0.0 are included by default.
Parameters
----------
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
tlt: tuple
log10(T) triad (n, llimit, step) for opacity grid
(default values chosen for grid lt = np.arange(20)*0.068 + 3.08,
to cover the range in the DR16 APOGEE MARCS grids)
tlrho: tuple
log10(rho) triad (n, llimit, step) for opacity grid
(default values chosen for grid lrho = np.arange(20)*0.59 -14.0,
to cover the range in the DR16 APOGEE MARCS grids)
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
tcfe: tuple
[C/Fe] triad
tnfe: tuple
[N/Fe] triad
tofe: tuple
[O/Fe] triad
rfeh: tuple
[r/Fe] triad (r-elements abundance ratio)
sfeh: tuple
[s.Fe] triad (s-elements abundance ratio)
zexclude: list
atomic numbers of the elements whose opacity is NOT to be
included in the table
(default None)
"""
#pynspec does not currently run in parallel
nthreads = 1
#expanding the triads t* into iterables
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
try:
nvmicro = len(tvmicro)
assert (nvmicro == 3), 'Error: vmicro triad must have three elements (n, llimit, step)'
vmicros = np.arange(tvmicro[0])*tvmicro[2] + tvmicro[1]
except TypeError:
print('Error: vmicro triad must have three elements (n, llimit, step)')
return ()
#ranges for the opacity table
try:
nlt = len(tlt)
assert (nlt == 3), 'Error: lt triad must have three elements (n, llimit, step)'
lt = np.arange(tlt[0])*tlt[2] + tlt[1] #log10(T)
except TypeError:
print('Error: tlt triad must have three elements (n, llimit, step)')
return ()
try:
nlrho = len(tlrho)
assert (nlrho == 3), 'Error: lrho triad must have three elements (n, llimit, step)'
lrho = np.arange(tlrho[0])*tlrho[2] + tlrho[1] #log10(density)
except TypeError:
print('Error: tlrho triad must have three elements (n, llimit, step)')
return ()
symbol, mass, sol = elements()
z_metals = np.arange(97,dtype=int) + 3
#Ar usually included among alphas in MARCS and not in Kurucz/Meszaros
z_alphas = np.array([8,10,12,14,16,18,20,22],dtype=int)
# rs increases: notes and data below from comments in the MARCS code (provided by B.Edvardsson)
# Fractional r-process abundance for Ga-Bi (r+s simply assumed == 100%) | Date 2000-01-18
# (Note: Ga-Sr (31-38) was just copied from Kaeppeler et al. 1989, below)
# s-process from Stellar models: Arlandini C., Kaeppeler F., Wisshak K.,
# Gallino R., Busso M., Straniero O., 1999, Astrophys J. 525, 886-900
# Fractions corrected to the revised meteoritic abundances
# of Grevesse N., Sauval A.J. 1998, Space Science Review 85, 161-174
# -0.99 is assigned to unstable elements
z_rs = np.arange(62,dtype=int) + 31
rfrac= np.array([.43, .47, .81, .85, .39, .47,
.41, .11, .08, .17, .15, .50,-.99, .68, .86,
.54, .80, .48, .65, .35, .75, .83, .80, .80,
.85, .19, .38, .23, .51, .44,-.99, .71, .93,
.85, .93, .85, .92, .83, .87, .67, .80, .44,
.59, .44, .91, .91, .99, .95, .94, .41, .24,
.54, .95,-.99,-.99,-.99,-.99,-.99,-.99, 1.0,
-.99, 1.0], dtype=float)
idir = 0
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
for vmicro in vmicros:
print(feh,afe,cfe,nfe,ofe,rfe,sfe)
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
#check input parameters are valid
imode = checkinput(wrange, vmicro, linelist)
#setup the slurm script
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH --ntasks-per-node "+str(4)+" \n")
s.write("#SBATCH -t 48:00:00"+" \n") #hh:mm:ss
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu = copy.copy(sol)
if (abs(feh) > 1e-7):
for i in range(len(z_metals)):
abu[z_metals[i] - 1] = abu[z_metals[i] - 1] * 10.**feh
if (abs(afe) > 1e-7):
for i in range(len(z_alphas)):
abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] * 10.**afe
if (abs(cfe) > 1e-7): abu[5] = abu[5] * 10.**cfe
if (abs(nfe) > 1e-7): abu[6] = abu[6] * 10.**nfe
if (abs(ofe) > 1e-7): abu[7] = abu[7] * 10.**ofe
if (abs(rfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * rfrac[i] * 10.**rfe
if (abs(sfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * (1.0 - rfrac[i]) * 10.**sfe
write55(wrange,dw=dw,imode=-3,hydprf=0, strength=strength, vmicro=vmicro, linelist=linelist)
write5(9999.,9.9,abu,atom)
writetas('tas',1,linelist)
write2(lt,lrho,wrange,filename='opt.dat', \
strength=strength,inttab=1)
if zexclude != None:
write3(zexclude)
create_links(linelist)
s.write('time ' + synspec + " < "+"fort.5"+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return()
def collect_marcs(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), \
ignore_missing_models=False):
"""Collects all the MARCS models in modeldir that are part of a regular grid defined
by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe),
[N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since
arrays with just one 0.0 are included by default.
Parameters
----------
modeldir: str
directory where model atmosphere files are
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
tcfe: tuple
[C/Fe] triad
tnfe: tuple
[N/Fe] triad
tofe: tuple
[O/Fe] triad
rfeh: tuple
[r/Fe] triad (r-elements abundance ratio)
sfeh: tuple
[s.Fe] triad (s-elements abundance ratio)
ignore_missing_models: bool
set to True to avoid stopping when a model is missing,
in which case a None is entered in the returning list
Returns
-------
files: list of str
file names with MARCS models that are in modeldir and match
the parameters in the requested grid
"""
#expanding the triads t* into iterables
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
print(teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe)
code = 'm*_t*_x3'
if logg >= 3.5:
a1 = 'p'
else:
a1 = 's'
filename = ("%s%4i_g%+.1f_%s_z%+.2f_a%+.2f_c%+.2f_n%+.2f_o%+.2f_r%+.2f_s%+.2f.mod*" % (a1,teff,logg,code,feh,afe,cfe,nfe,ofe,rfe,sfe) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %s %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f\n" % (files[-1],teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe) )
fi.close()
return(files)
def collect_k2odfnew(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
ignore_missing_models=False):
"""Collects all the ODFNEW Castelli/Kurucz models in modeldir that are part of a regular grid defined
by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), and [alpha/Fe] (tafe) are optional since
arrays with just one 0.0 are included by default.
NOTE: There are ODFNEW models with only afe=[alpha/Fe]=0.0 or 0.4. The latter are used whenever
afe takes values > 0.0, while the afe=0.0 models are used otherwise.
Parameters
----------
modeldir: str
directory where model atmosphere files are
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
ignore_missing_models: bool
set to True to avoid stopping when a model is missing,
in which case a None is entered in the returning list
Returns
-------
files: list of str
file names with Kurucz ODFNEWS models that are in modeldir and match
the parameters in the requested grid
"""
#expanding the triads t* into iterables
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
print(teff,logg,feh,afe)
code = 'k2odfnew.dat'
if afe > 0.0:
a1 = 'a'
else:
a1 = ''
if feh < 0.0:
a2 = 'am'
else:
a2 = 'ap'
filename = ("t%05ig%.1f%s%02i%s" % (teff,logg,a2,int(abs(feh)*10),a1+code) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %+.2f %+.2f \n" % (files[-1],teff,logg,feh,afe) )
fi.close()
return(files)
def getallt(modelfiles):
"""Collects all the values for temperature, density and electron number density
in a list of files with model atmospheres
Parameters
----------
modelfiles : list of str
files with model atmospheres
Returns
-------
t: list
list of all temperatures in all the layers of the input model atmospheres
rho: list
list of all values of gas pressure in all the layers of the input model atmospheres
ne: list
list of all values of electron number density in all the layers of the input model atmospheres
"""
t = []
rho = []
ne = []
for entry in modelfiles:
print('reading ',entry)
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(entry)
#atmostype,teff,logg,vmicro,abu,nd,atmos = read_model(entry)
for value in atmos['t']: t.append(value)
for value in atmos['rho']: rho.append(value)
for value in atmos['ne']: ne.append(value)
return(t,rho,ne)
def call_rotin(wave=None, flux=None, vrot=0.0, fwhm=0.0, space=1e-2, steprot=0.0, stepfwhm=0.0, clean=True, reuseinputfiles=False):
"""Convolves a synthetic spectrum with a rotation and/or Gaussian kernel
Interface to the fortran code rotin.
Parameters
----------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux
vrot: float
projected rotational velocity (km/s)
(default 0.)
space: float, optional
characteristic wavelength scale for variations in the spectrum (angstroms)
(default is 1e-2)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the rotin
temporary files (default Tr<ue)
reuseinputfiles: bool
set to take the input data from the output synspec file (fort.7) rather than
from the input arrays (wave, flux)
Returns
-------
wave2: numpy array of floats
wavelengths (angstroms)
flux2: numpy array of floats
flux
"""
if reuseinputfiles == False:
f = open('fort.7','w')
f2 = open('fort.17','w')
maxflux = np.max(flux)
for i in range(len(wave)):
f.write( ' %f %f \n' % (wave[i], flux[i]) )
f2.write( ' %f %f \n' % (wave[i], maxflux) )
f.close()
f2.close()
f = open('fort.5','w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", "'fort.11'") )
f.write( ' %f %f %f \n' % (vrot, space, steprot) )
f.write( ' %f %f \n' % (fwhm, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (np.min(wave), np.max(wave), 0) )
f.close()
synin = open('fort.5')
synout = open('syn.log','a')
p = subprocess.Popen([rotin], stdin=synin, stdout = synout, stderr = synout)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.11')), 'Error: I cannot read the file *fort.11* in '+tmpdir+' -- looks like rotin has crashed, please look at syn.log'
wave2, flux2 = np.loadtxt('fort.11', unpack=True)
print(len(wave),len(wave2))
if clean == True: cleanup()
return(wave2, flux2)
def read_model(modelfile):
"""Reads a model atmosphere into a structure
Parameters
----------
modelfile : str
file with a model atmosphere
Returns
-------
atmostype : str
type of model atmosphere (kurucz/marcs/phoenix)
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
#check
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
atmostype = identify_atmostype(modelfile)
if atmostype == 'kurucz':
teff, logg, vmicro, abu, nd, atmos = read_kurucz_model(modelfile)
if atmostype == 'marcs':
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(modelfile)
if atmostype == 'phoenix':
teff, logg, vmicro, abu, nd, atmos = read_phoenix_model(modelfile)
return (atmostype,teff,logg,vmicro,abu,nd,atmos)
def identify_atmostype(modelfile):
"""Idenfies the type of model atmosphere in an input file
Valid options are kurucz, marcs or phoenix
Parameters
----------
modelfile: str
file with a model atmosphere
Returns
-------
atmostype: str
can take the value 'kurucz', 'marcs' or 'phoenix' ('tlusty' soon to be added!)
"""
if ('PHOENIX' in modelfile and 'fits' in modelfile): atmostype = 'phoenix'
else:
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
print('modelfile / line=',modelfile,line)
type(line)
if ('TEFF' in line): atmostype = 'kurucz'
else: atmostype = 'marcs'
f.close()
return(atmostype)
def checksynspec(linelist,modelfile):
"""checking that executables and data are where it should be
Parameters
----------
linelist: array of str
file names of the line lists to be used. The first string should correspond
to the atomic line list and is mandatory. The remainder are optional and
correspond to molecular line lists. All files should be in synspec format.
(see documentation at http://nova.astro.umd.edu/Synspec43/synspec.html)
"""
dirs = [synpledir,modelatomdir,linelistdir,bindir]
for entry in dirs: assert (os.path.isdir(entry)), 'dir '+entry+' missing'
files = [synspec,rotin]
for entry in linelist:
if not os.path.isfile(entry):
ll = os.path.join(linelistdir,entry)
if os.path.isfile(ll): files.append(ll)
for entry in files: assert (os.path.isfile(entry)), 'file '+entry+' missing'
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
print(modeldir)
print(modelfile)
assert (os.path.isfile(modelfile)),'model atmosphere file '+modelfile+' missing'
return(True)
def checkinput(wrange, vmicro, linelist):
"""checking input parameters from user
Parameters
----------
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
Returns
------
imode: int
appropriate value for the variable imode, which specifies whether
one will use many atomic lines (imode=0), just a few (imode=1),
or none (H lines are an exception; imode=2)
"""
#determine imode
# imode = 0 is default, atoms and molecules, at least 2 line lists
# synple sets IFMOL = 1 in 'tas' when an input molecular line list is used
# but does not set it when only an atomic line list is given
# imode = 2 for pure continuum
# imode = 1 for few-lines mode
# imode = -3 for regular opacity tables (TLUSTY)
if len(linelist) == 0:
imode = 2 # no atomic or molecular line list -> pure continuum and no molecules
else:
#find range of atomic line list
if not os.path.isfile(linelist[0]):
ll = os.path.join(linelistdir,linelist[0])
if os.path.isfile(ll): linelist[0] = ll
nlines, minlambda, maxlambda = getlinelistrange(linelist[0])
#check
if nlines > 10:
assert (wrange[0] > minlambda-1 and wrange[1] < maxlambda+1),'wrange exceeds the allow range ('+str(minlambda)+' to '+str(maxlambda)+')'
imode = 0
else:
imode = 1
assert (vmicro >= 0.0),'vmicro = '+str(vmicro)+' but cannot < 0.'
return(imode)
def getlinelistrange(atomiclinelist):
#finds out min and max wavelengths for a line list
f = open(atomiclinelist,'r')
line = f.readline()
entries = line.split()
minlambda = float(entries[0])*10.
fsize = os.path.getsize(atomiclinelist)
f.seek(fsize-103)
line = f.readline()
f.close()
entries = line.split()
maxlambda = float(entries[0])*10.
nlines = int(0.01 * fsize)
return(nlines, minlambda,maxlambda)
def writetas(filename,nd,linelist):
#write non-std input parameters
# input: filename -- str -- name of the non-std. param. file to print
# nd -- int -- number of layers in the model
# nd -- list -- names of the linelist files (atomic first, then one
# or more molecular ones
f = open(filename,'w')
f.write("ND= "+str(nd)+" \n")
if len(linelist) > 1: f.write("IFMOL= "+one+" \n")
f.write("TMOLIM= 8000. \n")
f.close()
return()
def write3(zexclude):
f = open('fort.3','w')
for z in zexclude:
f.write( " %d %10.4e \n" % (z, 0.0) )
f.close()
return()
def write2(lt,lrho,wrange, filename='opt.data', dlw=2e-5, binary=False,strength=1e-4,inttab=1):
#write fort.2 file for creating opacity tables for TLUSTY
f = open('fort.2','w')
f.write( " %d %10.4e %10.4e \n" % (len(lt),10.**lt[0],10.**lt[-1]) )
f.write( " %d \n" % (1) )
f.write( " %d %10.4e %10.4e \n" % (len(lrho),10.**lrho[0],10.**lrho[-1]) )
nsamples = int( (np.log10(wrange[1]) - np.log10(wrange[0]) )/dlw) + 1
f.write( " %d %d %10.4e %10.4e \n" % (nsamples,inttab,wrange[0],wrange[1]) )
if binary == True:
ibingr = 1
else:
ibingr = 0
filename = "'"+filename+"'"
f.write( " %s %d \n" % (filename,ibingr) )
f.close()
return()
def write55(wrange,dw=1e-2,imode=0,hydprf=2,strength=1e-4,vmicro=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atmostype='kurucz'):
#imode,idst,iprin
#inmod,zero,ichang,ichemc
#lyman,zero,zero,zero,zero
#one,nlte,icontl,zero,ifhe2
#ihydpr,ihe1pr,ihe2pr
#wstart,wend,cutoff,zero,strength,wdist
if (atmostype == 'tlusty' or atmostype == 'marcs'): inmod = 1
else: inmod = 0
f = open('fort.55','w')
f.write(" "+str(imode)+" "+2*zero+"\n")
f.write(" "+str(inmod)+3*zero+"\n")
f.write(5*zero+"\n")
f.write(one+4*zero+"\n")
f.write(str(hydprf)+2*zero+"\n")
if imode == -3:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], -wrange[1], 100., 2000, strength, dw) )
else:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], wrange[1], 200., 2000, strength, dw) )
ll = len(linelist)
if ll < 2: f.write(2*zero)
else: f.write(str(ll-1) + ' ' + ' '.join(map(str,np.arange(ll-1)+20)))
f.write("\n")
f.write( ' %f \n' % (vmicro) )
f.close()
def write5(teff,logg,abu, atom='ap18', ofile='fort.5', nlte=False, tl=False):
symbol, mass, sol = elements()
f = open(ofile,'w')
f.write(' '+str(teff)+" "+str(logg).format('%7.4f')+" ! TEFF, GRAV \n")
if nlte:
f.write(" F F ! LTE, GRAY \n")
else:
f.write(" T F ! LTE, GRAY \n")
f.write(" 'tas' ! name of non-standard flags \n")
f.write(" 50 ! frequencies \n")
if tl:
natom = 30
else:
natom = len(abu)
f.write(" "+str(natom)+" ! NATOMS \n")
assert (atom == 'hhm' or atom == 'ap18' or atom == 'yo19'), 'atom must be one of: hhm/ap18/yo19!'
ex = np.ones(natom)
if atom == 'hhm' :
zex = [1] #atomic numbers of elements included explicitly (contributing cont. opacity)
elif atom == 'yo19':
zex = [1,11,12,19,20]
elif atom == 'ap18':
zex = [1,2,6,7,8,11,12,13,14,20,26]
for i in zex: ex[i-1] = 2
if nlte: ex[0] = -3
for i in range(natom):
f.write(' %2d %e %i %s\n' % (ex[i], abu[i], 0, ' ! ' +symbol[i]) )
for i in range(3): f.write("* \n")
if atom == 'hhm': # highly simplified continuum opacities -- just H and H-
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n" )
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == "yo19": # set for NLTE calculations for APOGEE (see Osorio+ 2019 A&A paper)
f.write("* ../data_atom for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 0' 'data_atom/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 16 0 0 0 ' H 1' 'data_atom/h1_16lev2.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 11 0 42 0 0 0 'Na 1' 'data_atom/NaIkas.tl' \n")
f.write(" 11 1 1 1 0 0 'Na 2' '' \n")
f.write(" 12 0 96 0 0 0 'Mg 1' 'data_atom/Mg1kas_F_ccc.tl' \n")
f.write(" 12 1 29 0 0 0 'Mg 2' 'data_atom/Mg2kas_F_ccc.tl' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 19 0 31 0 0 0 'K 1' 'data_atom/KIkas.tl' \n")
f.write(" 19 1 1 1 0 0 'K 2' '' \n")
f.write(" 20 0 66 0 0 0 'Ca 1' 'data_atom/Ca1kas_F_zat.tl' \n")
f.write(" 20 1 24 0 0 0 'Ca 2' 'data_atom/Ca2kas_F_zat.tl' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == 'ap18': # generic set used in Allende Prieto+ (2018) A&A paper
f.write("* ../data for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 2 0 14 0 0 0 'He 1' 'data/he1.dat' \n")
f.write(" 2 1 14 0 0 0 'He 2' 'data/he2.dat ' \n")
f.write(" 2 2 1 1 0 0 'He 3' ' ' \n")
f.write(" 6 0 104 0 0 0 ' C 1' 'data/c1.t' \n")
f.write(" 6 1 40 0 0 0 ' C 2' 'data/c2.t' \n")
f.write(" 6 2 1 1 0 0 ' C 3' ' ' \n")
f.write(" 7 0 89 0 0 0 ' N 1' 'data/n1.t' \n")
f.write(" 7 1 51 0 0 0 ' N 2' 'data/n2.t' \n")
f.write(" 7 2 1 1 0 0 ' N 3' ' ' \n")
f.write(" 8 0 54 0 0 0 ' O 1' 'data/o1.t' \n")
f.write(" 8 1 74 0 0 0 ' O 2' 'data/o2.t' \n")
f.write(" 8 2 1 1 0 0 ' O 3' ' ' \n")
f.write(" 11 0 32 0 0 0 'Na 1' 'data/na1.t' \n")
f.write(" 11 1 8 0 0 0 'Na 2' 'data/na2.t' \n")
f.write(" 11 2 1 1 0 0 'Na 3' ' ' \n")
f.write(" 12 0 71 0 0 0 'Mg 1' 'data/mg1.t' \n")
f.write(" 12 1 31 0 0 0 'Mg 2' 'data/mg2.t' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 13 0 33 0 0 0 'Al 1' 'data/al1.t' \n")
f.write(" 13 1 81 0 0 0 'Al 2' 'data/al2.t' \n")
f.write(" 13 2 1 1 0 0 'Al 3' ' ' \n")
f.write(" 14 0 57 0 0 0 'Si 1' 'data/si1.t' \n")
f.write(" 14 1 46 0 0 0 'Si 2' 'data/si2.t' \n")
f.write(" 14 2 1 1 0 0 'Si 3' ' ' \n")
f.write(" 20 0 79 0 0 0 'Ca 1' 'data/ca1.t' \n")
f.write(" 20 1 32 0 0 0 'Ca 2' 'data/ca2.t' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 26 0 49 0 0 0 'Fe 1' 'data/tlusty_fe1_topmod.dat' \n")
f.write(" 26 1 41 0 0 0 'Fe 2' 'data/tlusty_fe2_topmod.dat' \n")
f.write(" 26 2 1 1 0 0 'Fe 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
f.write("* \n")
f.write("* end \n")
f.close()
def write8(teff, logg, nd, atmos, atmostype, ofile='fort.8'):
f = open(ofile,'w')
if atmostype == 'tlusty':
f.write(" "+str(nd)+" "+str(3)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i] ) )
f.close()
else:
if atmostype == 'marcs':
f.write(" "+str(nd)+" "+str(-4)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i], atmos['rho'][i]/atmos['mmw'][i]/1.67333e-24 + atmos['ne'][i] ) )
f.close()
else:
f.write( 'TEFF %7.0f GRAVITY %7.5f LTE \n' % (teff, logg) )
for i in range(21): f.write('\n')
f.write( 'READ DECK6%3i RHOX,T,P,XNE \n' % nd )
for i in range(nd):
f.write( '%e %f %e %e \n' % (atmos['dm'][i], atmos['t'][i], atmos['p'][i], atmos['ne'][i]) )
f.close()
return()
def create_links(linelist):
#create soft links for line lists, mand odel atom dir
for i in range(len(linelist)):
if not os.path.isfile(linelist[i]):
ll = os.path.join(linelistdir,linelist[i])
if os.path.isfile(ll): linelist[i] = ll
if i == 0: os.symlink(linelist[0],'fort.19')
else: os.symlink(linelist[i],'fort.'+str(20-1+i))
os.symlink(modelatomdir,'./data')
return()
def cleanup():
#cleanup all temporary files
files = os.listdir('.')
for entry in files:
if os.path.islink(entry) and entry.startswith('fort'): os.unlink(entry)
if os.path.isfile(entry) and entry.startswith('fort'): os.remove(entry)
if os.path.islink('data'): os.unlink('data')
if os.path.isfile('tas'): os.remove('tas')
assert (not os.path.isdir('data')), 'A subdirectory *data* exists in this folder, and that prevents the creation of a link to the data directory for synple'
return()
def read_kurucz_model(modelfile):
"""Reads a Kurucz model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
f = open(modelfile,'r')
line = f.readline()
entries = line.split()
assert (entries[0] == 'TEFF' and entries[2] == 'GRAVITY'), 'Cannot find Teff and logg in the file header'
teff = float(entries[1])
logg = float(entries[3])
while entries[0] != 'ABUNDANCE':
line = f.readline()
entries = line.split()
abu = []
if entries[1] == 'SCALE':
scale = float(entries[2])
while entries[0] == 'ABUNDANCE':
i = 0
for word in entries:
if (word == 'CHANGE'): w = i
i = i + 1
for i in range(int((len(entries)-w-1)/2)):
z = int(entries[w+1+2*i])
if (z == 1): nhntot = float(entries[w+2+2*i])
if (z < 3): abu.append(float(entries[w+2+2*i]) / nhntot)
else: abu.append(scale*10.**(float(entries[w+2+2*i])) / nhntot)
line = f.readline()
entries = line.split()
assert (entries[0] == 'READ'), 'I cannot find the header of the atmospheric table in the input Kurucz model'
nd = int(entries[2]) - 1
line = f.readline()
entries = line.split()
line = f.readline()
entries = line.split()
vmicro = float(entries[6])/1e5
dm = [ float(entries[0]) ]
t = [ float(entries[1]) ]
p = [ float(entries[2]) ]
ne = [ float(entries[3]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[0]))
t.append( float(entries[1]))
p.append( float(entries[2]))
ne.append( float(entries[3]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model(modelfile):
"""Reads a MARCS model atmospheres
Parameters
----------
modelfile: str
file name. It can be a gzipped (.gz) file
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
dm = [ float(entries[-1]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[7]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model2(modelfile):
"""Reads a MARCS model atmospheres.
While read_marcs_model returns T, Pg and Ne in the structure 'atmos'
read_marcs_model2 returns T, rho, mmw, and Ne.
Parameters
----------
modelfile: str
file name. It can be a gzipped (.gz) file
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, density,
mean molecular weight and electron number density
"""
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
rho = [ float(entries[3]) ]
dm = [ float(entries[7]) ]
mmw = [ float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
rho.append( float(entries[3]))
dm.append( float(entries[7]))
mmw.append( float(entries[4]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'rho','mmw','ne'),
'formats':('f', 'f', 'f','f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['rho'] = rho
atmos['mmw'] = mmw
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_model(modelfile):
"""Reads a FITS Phoenix model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
from astropy.io import fits
h = fits.open(modelfile)[0].header
f = fits.open(modelfile)[1].data
nd = len(f['temp'])
teff = float(h['PHXTEFF'])
logg = float(h['PHXLOGG'])
vmicro = float(h['PHXXI_L'])
m_h = float(h['PHXM_H'])
alpha = float(h['PHXALPHA'])
symbol, mass,sol = elements(husser=True)
abu = sol
z_metals = np.arange(97,dtype=int) + 3
z_alphas = np.array([8,10,12,14,16,20,22],dtype=int)
for i in range(len(z_metals)): abu[z_metals[i] - 1] = abu[z_metals[i] - 1] + m_h
for i in range(len(z_alphas)): abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] + alpha
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = f['pgas'] / 10.**logg
atmos['t'] = f['temp']
atmos['p'] = f['pgas']
atmos['ne'] = f['pe']/ bolk / f['temp']
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_text_model(modelfile):
"""Reads a plain-text Phoenix model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
f = open(modelfile,'r')
line = f.readline()
while line[0:4] != " no.":
line = f.readline()
entries = line.split()
nd = int(entries[5])
print('nd=',nd)
while line[0:14] != " model: teff":
line = f.readline()
entries = line.split()
teff = float(entries[3])
print('teff=',teff)
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[0] == 'log(g):' and entries[2] == '[cm/s**2]'), 'Cannot find logg in the file header'
logg = float(entries[1])
print('logg=',logg)
line = f.readline()
while line[0:22] != " Element abundances :":
line = f.readline()
symbol,mass,sol = elements()
sy = []
ab = []
while line[0:29] != " Element abundances relative":
line = f.readline()
#print(line)
if line[0:9] == ' element:':
entries = line.split()
for word in entries[1:]: sy.append(word)
if line[0:11] == ' abundance:':
entries = line.split()
for word in entries[1:]: ab.append(word)
assert (len(sy) == len(ab)), 'different elements in arrays sy (elemental symbols) and ab (abundances)'
abu = np.ones(99)*1e-99
i = 0
for item in sy:
try:
index = symbol.index(item)
abu[index] = 10.**(float(ab[i])-12.)
except ValueError:
print("the symbol ",item," is not recognized as a valid element")
i = i + 1
print('abu=',abu)
while line[0:72] != " l tstd temperature pgas pe density mu":
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[2].replace('D','E')) ]
p = [ float(entries[3].replace('D','E')) ]
ne = [ float(entries[4].replace('D','E')) / bolk / float(entries[2].replace('D','E')) ]
dm = [ float(entries[3].replace('D','E')) / 10.**logg ] #assuming hydrostatic equil. and negliglible radiation and turb. pressure
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[2].replace('D','E')))
p.append( float(entries[3].replace('D','E')))
ne.append( float(entries[4].replace('D','E')) / bolk / float(entries[2]))
dm.append ( float(entries[3].replace('D','E')) / 10.**logg )
vmicro = 0.0
while (line[0:6] != " greli"):
line = f.readline()
if line == '':
print('Cannot find a value for vmicro (vturb) in the model atmosphere file ',modelfile)
break
if line != '':
entries = line.split()
vmicro = float(entries[5])
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def interp_spl(xout, x, y):
"""Interpolates in 1D using cubic splines
Parameters
----------
x: numpy array or list
input abscissae
y: numpy array or list
input ordinates
xout: numpy array or list
array of abscissae to interpolate to
Returns
-------
yout: numpy array or list
array of interpolated values
"""
tck = interpolate.splrep(x, y, s=0)
yout = interpolate.splev(xout, tck, der=0)
return(yout)
def elements(husser=False):
"""Reads the solar elemental abundances
Parameters
----------
husser: bool, optional
when set the abundances adopted for Phoenix models by Huser et al. (2013)
are adopted. Otherwise Asplund et al. (2005) are used -- consistent with
the MARCS (Gustafsson et al. 2008) models and and Kurucz (Meszaros et al. 2012)
Kurucz model atmospheres.
Returns
-------
symbol: numpy array of str
element symbols
mass: numpy array of floats
atomic masses (elements Z=1-99)
sol: numpy array of floats
solar abundances N/N(H)
"""
symbol = [
'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne',
'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca',
'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr',
'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn',
'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd',
'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg',
'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th',
'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ]
mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994,
18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376,
32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415,
51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723,
72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682,
112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29,
132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36,
151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421,
173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217,
195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210.,
222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244.,
243., 247., 247., 251., 252. ]
if not husser:
#Asplund, Grevesse and Sauval (2005), basically the same as
#Grevesse N., Asplund M., Sauval A.J. 2007, Space Science Review 130, 205
sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84,
6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31,
3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60,
2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59,
1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00,
1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45,
-9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08,
0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13,
0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
else:
#a combination of meteoritic/photospheric abundances from Asplund et al. 2009
#chosen for the Husser et al. (2013) Phoenix model atmospheres
sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93,
6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34,
3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56,
3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58,
1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04,
1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42,
-9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92,
0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17,
0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0)
return (symbol,mass,sol)
def lgconv(xinput, yinput, fwhm, ppr=None):
"""convolution with a Gaussian in linear lambda scale
for a constant resolution
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (same units as for xinput)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is linear,
otherwise a subset of the linearly resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to a linear lambda wavelength scale if need be
xx = np.diff(xinput)
if max(xx) - min(xx) > 1.e-7: #input not linearly sampled
nel = len(xinput)
minx = np.min(xinput)
maxx = np.max(xinput)
x = np.linspace(minx,maxx,nel)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else: #input linearly sampled
x = xinput
y = yinput
step = x[1] - x[0]
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
#y = ss.fftconvolve(y,kernel,'valid')
print(npoints)
edge = int(npoints/2)
x = x[edge:-edge]
print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def vgconv(xinput,yinput,fwhm, ppr=None):
"""convolution with a Gaussian in log lambda scale
for a constant resolving power
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.log(xinput[0])
maxx = np.log(xinput[-1])
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = np.log(xinput[1])-np.log(xinput[0])
fwhm = fwhm/clight # inverse of the resolving power
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
edge = int(npoints/2)
x = x[edge:-edge]
#print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
print(fwhm,step,ppr,fac)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def rotconv(xinput,yinput,vsini, ppr=None):
"""convolution with a Rotation profile
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
vsini: float
projected rotational velocity (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.min(np.log(xinput))
maxx = np.max(np.log(xinput))
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
deltamax=vsini/clight
npoints = 2*int(deltamax/step)+1
xx = np.linspace(-deltamax,deltamax,npoints)
c1=2.0*(1.0-epsilon)/np.pi/(1.0-epsilon/3.0)/deltamax
c2=0.5*epsilon/(1.0-epsilon/3.0)/deltamax
r2=(xx/deltamax)**2
kernel = c1*np.sqrt(1.0-r2)+c2*(1.0-r2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
print(xinput.size,x.size,y.size)
edge = int(npoints/2)
x = x[edge:-edge]
if ppr != None:
fac = int(deltamax / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def gsynth(synthfile,fwhm=0.0,outsynthfile=None,ppr=5,wrange=None,freeze=None):
"""Smooth the spectra in a FERRE grid by Gaussian convolution
Parameters
----------
synthfile: str
name of the input FERRE synth file
fwhm: float
FWHM of the Gaussian kernel (km/s)
(default is 0.0, which means no convolution is performed)
outsynthfile: str
name of the output FERRE synth file
(default is the same as synth file, but starting with 'n')
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default is 5, set to None to keep the original sampling)
wrange: tuple
Starting and ending wavelengths (if a smaller range that
the input's is desired)
(default None, to keep the original range)
freeze: dictionary
Allows to reduce the dimensionality of the grid. The keys are the labels
of the dimensions to freeze (as given in in the header of the input grid)
with the values that should be adopted for those 'frozen' dimensions.
Example: set freeze = {'TEFF': 5000.} to fix that value for the Teff dimension
in a grid.
(default None, to retain all the original dimensions)
Returns
-------
writes outsynthfile with the smooth spectra
"""
if outsynthfile is None: outsynthfile='n'+synthfile[1:]
logw=0
#read header, update and write out
fin = open(synthfile,'r')
fout = open(outsynthfile,'w')
hd = []
labels = []
line = fin.readline()
hd.append(line)
while line[1] != "/":
line = fin.readline()
if "N_P" in line: n_p = np.array(line.split()[2:],dtype=int)
if "STEPS" in line: steps = np.array(line.split()[2:],dtype=float)
if "LLIMITS" in line: llimits = np.array(line.split()[2:],dtype=float)
if "LABEL" in line: labels.append(line.split()[-1][1:-1])
if "NPIX" in line: npix = int(line.split()[2])
if "N_OF_DIM" in line: ndim = int(line.split()[2])
if "WAVE" in line: wave = np.array(line.split()[2:],dtype=float)
if "LOGW" in line: logw = int(line.split()[2])
if "RESOLUTION" in line: resolution = float(line.split()[2])
hd.append(line)
assert (len(n_p) == len(steps) & len(n_p) == len(llimits) & len(n_p) == len(labels) & len(n_p) == ndim), 'The dimension of the parameters from the header are inconsistent'
#update header parameters
x = np.arange(npix)*wave[1]+wave[0]
if logw == 1: x=10.**x
if logw == 2: x=np.exp(x)
#define indices for grid loops
ll = []
ind_n_p = []
i = 0
for entry in labels:
if freeze is not None:
lfkeys = list(freeze.keys())
if entry not in lfkeys: ind_n_p.append(i)
else:
ind_n_p.append(i)
ll.append(np.arange(n_p[i]))
i = i + 1
ind = list(product(*ll))
if wrange is not None:
assert (len(wrange) == 2), 'Error: wrange must have two elements'
section1 = np.where( (x >= wrange[0]*(1.-10.*fwhm/clight)) & (x <= wrange[1]*(1.+10.*fwhm/clight)) )
x = x[section1]
npix = len(x)
if fwhm > 1.e-7:
y = np.ones(npix)
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
print('Warning -- fwhm <= 1.e-7, no convolution will be performed, ppr will be ignored')
xx = x
print(len(x),len(xx))
if wrange is not None:
section2 = np.where( (xx >= wrange[0]) & (xx <= wrange[1]) )
xx = xx [section2]
#print(x,xx)
#print(len(x),len(xx))
jlabel = 0
for line in hd:
if "N_OF_DIM" in line: line = " N_OF_DIM = "+str(len(ind_n_p))+"\n"
if "N_P" in line: line = " N_P = "+' '.join(map(str,n_p[ind_n_p]))+"\n"
if "STEPS" in line: line = " STEPS = "+' '.join(map(str,steps[ind_n_p]))+"\n"
if "LLIMITS" in line: line = " LLIMITS = "+' '.join(map(str,llimits[ind_n_p]))+"\n"
if freeze is not None:
if "LABEL" in line:
ilabel = line.split()[-1][1:-1] #drop starting/ending quotes
if ilabel in lfkeys:
continue
else:
jlabel = jlabel + 1
line = " LABEL("+str(jlabel)+") = "+ilabel+"\n"
if "NPIX" in line: line = " NPIX = "+str(len(xx))+"\n"
if "WAVE" in line: line = " WAVE = "+str(np.log10(xx[0]))+" "+str(np.log10(xx[1])-np.log10(xx[0]))+"\n"
if "LOGW" in line: line = " LOGW = 1 \n"
if "RESOLUTION" in line: line = " RESOLUTION = "+str(clight/np.sqrt(clight**2/resolution**2 + fwhm**2))+"\n"
fout.write(line)
#smooth and write data
k = 0
j = 0
ntot = np.prod(n_p)
for i in ind:
j = j + 1
print('line ',j,' of ',ntot)
#print(k,ntot,i)
#print(i,steps,llimits)
par = i*steps+llimits
line = fin.readline()
if freeze is not None:
skip = True
for entry in lfkeys:
if (abs(freeze[entry] - par[labels.index(entry)]) < 1e-6): skip = False
if skip: continue
y = np.array(line.split(),dtype=float)
if wrange is not None: y = y [section1]
if fwhm > 1.e-7:
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
xx,yy = x, y
if wrange is not None: yy = yy[section2]
yy.tofile(fout,sep=" ",format="%0.4e")
fout.write("\n")
k = k + 1
fin.close()
fout.close()
if __name__ == "__main__":
npar = len(sys.argv)
assert (npar >= 4), 'Synple requires at least 3 input parameters (modelfile wstart wend)'
assert (npar <= 7), 'Synple requires at maximum 6 input parameters (modelfile wstart wend vmicro vrot fwhm)'
vmicro = None
vrot = 0.0
fwhm = 0.0
modelfile = sys.argv[1]
wstart = float(sys.argv[2])
wend = float(sys.argv[3])
if (npar > 4):
vmicro = float(sys.argv[4])
if (npar > 5):
fwhm = float(sys.argv[5])
if (npar > 6):
vrot = float(sys.argv[6])
#symbol, mass, sol = elements()
x, y, z = syn(modelfile, (wstart,wend), save=True, vmicro=vmicro, vrot=vrot, fwhm=fwhm)
| 32.733132 | 173 | 0.590379 |
import os
import sys
import subprocess
import numpy as np
import glob
import time
import copy
import gzip
from scipy import interpolate
import matplotlib.pyplot as plt
from itertools import product
synpledir = os.path.dirname(os.path.realpath(__file__))
modeldir = synpledir + "/models"
modelatomdir = synpledir + "/data"
linelistdir = synpledir + "/linelists"
bindir = synpledir + "/bin"
synspec = bindir + "/s54d"
rotin = bindir + "/rotin3"
clight = 299792.458
epsilon = 0.6
bolk = 1.38054e-16
zero = " 0 "
one = " 1 "
two = " 2 "
def syn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, tmpdir=None):
checksynspec(linelist,modelfile)
atmostype, teff, logg, vmicro2, abu2, nd, atmos = read_model(modelfile)
if vmicro == None: vmicro = vmicro2
if abu == None: abu = abu2
if dw == None:
space = np.mean(wrange) * np.sqrt(9.12e-15 * np.min(atmos['t']) + vmicro** 2) / clight / 3.
else:
space = dw
imode = checkinput(wrange, vmicro, linelist)
print ('teff,logg,vmicro=',teff,logg,vmicro)
logfile = 'syn.log'
if tmpdir is not None:
startdir = os.getcwd()
logfile = os.path.join(startdir,os.path.split(tmpdir)[-1]) + "_" + logfile
try:
os.mkdir(tmpdir)
except OSError:
print( "cannot create tmpdir %s " % (tmpdir) )
try:
os.chdir(tmpdir)
except OSError:
print("cannot enter tmpdir %s " % (tmpdir) )
cleanup()
writetas('tas',nd,linelist)
write5(teff,logg,abu,atom)
write8(teff,logg,nd,atmos,atmostype)
write55(wrange,space,imode,2,strength,vmicro,linelist,atmostype)
create_links(linelist)
if compute == False:
wave = None
flux = None
cont = None
else:
synin = open('fort.5')
synout = open(logfile,'w')
start = time.time()
p = subprocess.Popen([synspec], stdin=synin, stdout = synout, stderr= synout, shell=True)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.7')), 'Error: I cannot read the file *fort.7* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
assert (os.path.isfile('fort.17')), 'Error: I cannot read the file *fort.17* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
wave, flux = np.loadtxt('fort.7', unpack=True)
wave2, flux2 = np.loadtxt('fort.17', unpack=True)
if dw == None and fwhm <= 0. and vrot <= 0.: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('syn ellapsed time ',end - start, 'seconds')
if fwhm > 0. or vrot > 0.:
start = time.time()
print( vrot, fwhm, space, steprot, stepfwhm)
wave, flux = call_rotin (wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
if dw == None: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
if (dw != None):
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave3 = np.arange(nsamples)*dw + wrange[0]
flux = interp_spl(wave3, wave, flux)
cont = np.interp(wave3, wave2, flux2)
wave = wave3
if clean == True: cleanup()
if tmpdir is not None:
try:
os.chdir(startdir)
except OSError:
print("cannot change directory from tmpdir %s to startdir %s" % (tmpdir,startdir) )
if clean == True:
try:
os.rmdir(tmpdir)
except OSError:
print("cannot remove directory tmpdir %s" % (tmpdir) )
if save == True:
if synfile == None:
tmpstr = os.path.split(modelfile)[-1]
synfile = tmpstr[:tmpstr.rfind('.')]+'.syn'
np.savetxt(synfile,(wave,flux,cont))
return(wave, flux, cont)
def mpsyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
from multiprocessing import Pool,cpu_count
if nthreads == 0:
nthreads = cpu_count()
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
pararr = [modelfile, wrange1, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, 'par'+str(i) ]
pars.append(pararr)
pool = Pool(nthreads)
results = pool.starmap(syn,pars)
pool.close()
pool.join()
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def raysyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
import psutil
import ray
@ray.remote
def fun(vari,cons):
wrange,tmpdir = vari
modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute = cons
x, y, z = syn(modelfile, wrange, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, tmpdir)
return(x,y,z)
if nthreads == 0:
nthreads = psutil.cpu_count(logical=False)
print('nthreads=',nthreads)
ray.init(num_cpus=nthreads)
rest = [ modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute ]
constants = ray.put(rest)
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
folder = 'par'+str(i)
pararr = [wrange1, 'par'+str(i) ]
pars.append(pararr)
results = ray.get([fun.remote(pars[i],constants) for i in range(nthreads)])
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def multisyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None, nthreads=1):
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
assert (len(modelfiles) > 0), 'multisyn needs at least one model to work with'
wave = None
flux = None
cont = None
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
abu1 = copy.copy(abu)
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = mpsyn(entry, wrange, dw=None, strength=strength, \
vmicro=vmicro1, abu=abu1, linelist=linelist, atom=atom, \
clean=clean, save=save, nthreads=nthreads)
space = np.mean(np.diff(x))
for vrot1 in vrots:
for fwhm1 in fwhms:
if fwhm1> 0. or vrot1 > 0.:
start = time.time()
print( entry, vmicro1, nfe1, vrot1, fwhm1, space)
x2, y2 = call_rotin (x, y, vrot, fwhm, space, steprot, stepfwhm, \
clean=False, reuseinputfiles=True)
z2 = np.interp(x2, x, z)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
else:
x2, y2, z2 = x, y, z
if entry == modelfiles[0] and vmicro1 == vmicros[0] and vrot1 == vrots[0] and fwhm1 == fwhms[0] and nfe1 == nfes[0]:
if dw == None: dw = np.median(np.diff(x2))
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave = np.arange(nsamples)*dw + wrange[0]
flux = interp_spl(wave, x2, y2)
cont = np.interp(wave, x2, z2)
else:
flux = np.vstack ( (flux, interp_spl(wave, x, y) ) )
cont = np.vstack ( (cont, np.interp(wave, x, z) ) )
return(wave, flux, cont)
def polysyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None):
nthreads = 1
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
idir = 0
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
if entry == 'missing':
pass
else:
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH -t 04:00:00"+" \n")
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu1 = copy.copy(abu)
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = syn(entry, wrange, dw=None, strength=strength, vmicro=vmicro1, \
abu=abu1, linelist=linelist, atom=atom, compute=False)
s.write(synspec+" < "+"fort.5"+"\n")
si = open("fort.55",'r')
for i in range(6): line = si.readline()
entries = line.split()
space = float(entries[5])
si.close()
iconv = 0
for vrot1 in vrots:
for fwhm1 in fwhms:
print('iconv=',iconv)
iconv = iconv + 1
inconv = ("%07dfort.5" % (iconv) )
outconv = ("'%07dfort.7'" % (iconv) )
if fwhm1> 0. or vrot1 > 0.:
f = open(inconv,'w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", outconv) )
f.write( ' %f %f %f \n' % (vrot1, space, steprot) )
f.write( ' %f %f \n' % (fwhm1, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (wrange[0], wrange[1], 0) )
f.close()
s.write(rotin+" < "+inconv+"\n")
else:
s.write("cp "+" fort.7 "+outconv[1:-1]+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return(None,None,None)
def polyopt(wrange=(9.e2,1.e5),dw=0.1,strength=1e-3, linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], \
tlt = (20,3.08,0.068), tlrho = (20,-14.0,0.59), \
tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), \
tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), tvmicro=(1,1.0,0.0), \
zexclude=None, atom='ap18'):
nthreads = 1
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
try:
nvmicro = len(tvmicro)
assert (nvmicro == 3), 'Error: vmicro triad must have three elements (n, llimit, step)'
vmicros = np.arange(tvmicro[0])*tvmicro[2] + tvmicro[1]
except TypeError:
print('Error: vmicro triad must have three elements (n, llimit, step)')
return ()
try:
nlt = len(tlt)
assert (nlt == 3), 'Error: lt triad must have three elements (n, llimit, step)'
lt = np.arange(tlt[0])*tlt[2] + tlt[1]
except TypeError:
print('Error: tlt triad must have three elements (n, llimit, step)')
return ()
try:
nlrho = len(tlrho)
assert (nlrho == 3), 'Error: lrho triad must have three elements (n, llimit, step)'
lrho = np.arange(tlrho[0])*tlrho[2] + tlrho[1]
except TypeError:
print('Error: tlrho triad must have three elements (n, llimit, step)')
return ()
symbol, mass, sol = elements()
z_metals = np.arange(97,dtype=int) + 3
z_alphas = np.array([8,10,12,14,16,18,20,22],dtype=int)
z_rs = np.arange(62,dtype=int) + 31
rfrac= np.array([.43, .47, .81, .85, .39, .47,
.41, .11, .08, .17, .15, .50,-.99, .68, .86,
.54, .80, .48, .65, .35, .75, .83, .80, .80,
.85, .19, .38, .23, .51, .44,-.99, .71, .93,
.85, .93, .85, .92, .83, .87, .67, .80, .44,
.59, .44, .91, .91, .99, .95, .94, .41, .24,
.54, .95,-.99,-.99,-.99,-.99,-.99,-.99, 1.0,
-.99, 1.0], dtype=float)
idir = 0
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
for vmicro in vmicros:
print(feh,afe,cfe,nfe,ofe,rfe,sfe)
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
imode = checkinput(wrange, vmicro, linelist)
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH --ntasks-per-node "+str(4)+" \n")
s.write("#SBATCH -t 48:00:00"+" \n")
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu = copy.copy(sol)
if (abs(feh) > 1e-7):
for i in range(len(z_metals)):
abu[z_metals[i] - 1] = abu[z_metals[i] - 1] * 10.**feh
if (abs(afe) > 1e-7):
for i in range(len(z_alphas)):
abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] * 10.**afe
if (abs(cfe) > 1e-7): abu[5] = abu[5] * 10.**cfe
if (abs(nfe) > 1e-7): abu[6] = abu[6] * 10.**nfe
if (abs(ofe) > 1e-7): abu[7] = abu[7] * 10.**ofe
if (abs(rfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * rfrac[i] * 10.**rfe
if (abs(sfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * (1.0 - rfrac[i]) * 10.**sfe
write55(wrange,dw=dw,imode=-3,hydprf=0, strength=strength, vmicro=vmicro, linelist=linelist)
write5(9999.,9.9,abu,atom)
writetas('tas',1,linelist)
write2(lt,lrho,wrange,filename='opt.dat', \
strength=strength,inttab=1)
if zexclude != None:
write3(zexclude)
create_links(linelist)
s.write('time ' + synspec + " < "+"fort.5"+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return()
def collect_marcs(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), \
ignore_missing_models=False):
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
print(teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe)
code = 'm*_t*_x3'
if logg >= 3.5:
a1 = 'p'
else:
a1 = 's'
filename = ("%s%4i_g%+.1f_%s_z%+.2f_a%+.2f_c%+.2f_n%+.2f_o%+.2f_r%+.2f_s%+.2f.mod*" % (a1,teff,logg,code,feh,afe,cfe,nfe,ofe,rfe,sfe) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %s %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f\n" % (files[-1],teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe) )
fi.close()
return(files)
def collect_k2odfnew(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
ignore_missing_models=False):
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
print(teff,logg,feh,afe)
code = 'k2odfnew.dat'
if afe > 0.0:
a1 = 'a'
else:
a1 = ''
if feh < 0.0:
a2 = 'am'
else:
a2 = 'ap'
filename = ("t%05ig%.1f%s%02i%s" % (teff,logg,a2,int(abs(feh)*10),a1+code) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %+.2f %+.2f \n" % (files[-1],teff,logg,feh,afe) )
fi.close()
return(files)
def getallt(modelfiles):
t = []
rho = []
ne = []
for entry in modelfiles:
print('reading ',entry)
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(entry)
for value in atmos['t']: t.append(value)
for value in atmos['rho']: rho.append(value)
for value in atmos['ne']: ne.append(value)
return(t,rho,ne)
def call_rotin(wave=None, flux=None, vrot=0.0, fwhm=0.0, space=1e-2, steprot=0.0, stepfwhm=0.0, clean=True, reuseinputfiles=False):
if reuseinputfiles == False:
f = open('fort.7','w')
f2 = open('fort.17','w')
maxflux = np.max(flux)
for i in range(len(wave)):
f.write( ' %f %f \n' % (wave[i], flux[i]) )
f2.write( ' %f %f \n' % (wave[i], maxflux) )
f.close()
f2.close()
f = open('fort.5','w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", "'fort.11'") )
f.write( ' %f %f %f \n' % (vrot, space, steprot) )
f.write( ' %f %f \n' % (fwhm, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (np.min(wave), np.max(wave), 0) )
f.close()
synin = open('fort.5')
synout = open('syn.log','a')
p = subprocess.Popen([rotin], stdin=synin, stdout = synout, stderr = synout)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.11')), 'Error: I cannot read the file *fort.11* in '+tmpdir+' -- looks like rotin has crashed, please look at syn.log'
wave2, flux2 = np.loadtxt('fort.11', unpack=True)
print(len(wave),len(wave2))
if clean == True: cleanup()
return(wave2, flux2)
def read_model(modelfile):
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
atmostype = identify_atmostype(modelfile)
if atmostype == 'kurucz':
teff, logg, vmicro, abu, nd, atmos = read_kurucz_model(modelfile)
if atmostype == 'marcs':
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(modelfile)
if atmostype == 'phoenix':
teff, logg, vmicro, abu, nd, atmos = read_phoenix_model(modelfile)
return (atmostype,teff,logg,vmicro,abu,nd,atmos)
def identify_atmostype(modelfile):
if ('PHOENIX' in modelfile and 'fits' in modelfile): atmostype = 'phoenix'
else:
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
print('modelfile / line=',modelfile,line)
type(line)
if ('TEFF' in line): atmostype = 'kurucz'
else: atmostype = 'marcs'
f.close()
return(atmostype)
def checksynspec(linelist,modelfile):
dirs = [synpledir,modelatomdir,linelistdir,bindir]
for entry in dirs: assert (os.path.isdir(entry)), 'dir '+entry+' missing'
files = [synspec,rotin]
for entry in linelist:
if not os.path.isfile(entry):
ll = os.path.join(linelistdir,entry)
if os.path.isfile(ll): files.append(ll)
for entry in files: assert (os.path.isfile(entry)), 'file '+entry+' missing'
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
print(modeldir)
print(modelfile)
assert (os.path.isfile(modelfile)),'model atmosphere file '+modelfile+' missing'
return(True)
def checkinput(wrange, vmicro, linelist):
if len(linelist) == 0:
imode = 2
else:
if not os.path.isfile(linelist[0]):
ll = os.path.join(linelistdir,linelist[0])
if os.path.isfile(ll): linelist[0] = ll
nlines, minlambda, maxlambda = getlinelistrange(linelist[0])
if nlines > 10:
assert (wrange[0] > minlambda-1 and wrange[1] < maxlambda+1),'wrange exceeds the allow range ('+str(minlambda)+' to '+str(maxlambda)+')'
imode = 0
else:
imode = 1
assert (vmicro >= 0.0),'vmicro = '+str(vmicro)+' but cannot < 0.'
return(imode)
def getlinelistrange(atomiclinelist):
f = open(atomiclinelist,'r')
line = f.readline()
entries = line.split()
minlambda = float(entries[0])*10.
fsize = os.path.getsize(atomiclinelist)
f.seek(fsize-103)
line = f.readline()
f.close()
entries = line.split()
maxlambda = float(entries[0])*10.
nlines = int(0.01 * fsize)
return(nlines, minlambda,maxlambda)
def writetas(filename,nd,linelist):
f = open(filename,'w')
f.write("ND= "+str(nd)+" \n")
if len(linelist) > 1: f.write("IFMOL= "+one+" \n")
f.write("TMOLIM= 8000. \n")
f.close()
return()
def write3(zexclude):
f = open('fort.3','w')
for z in zexclude:
f.write( " %d %10.4e \n" % (z, 0.0) )
f.close()
return()
def write2(lt,lrho,wrange, filename='opt.data', dlw=2e-5, binary=False,strength=1e-4,inttab=1):
f = open('fort.2','w')
f.write( " %d %10.4e %10.4e \n" % (len(lt),10.**lt[0],10.**lt[-1]) )
f.write( " %d \n" % (1) )
f.write( " %d %10.4e %10.4e \n" % (len(lrho),10.**lrho[0],10.**lrho[-1]) )
nsamples = int( (np.log10(wrange[1]) - np.log10(wrange[0]) )/dlw) + 1
f.write( " %d %d %10.4e %10.4e \n" % (nsamples,inttab,wrange[0],wrange[1]) )
if binary == True:
ibingr = 1
else:
ibingr = 0
filename = "'"+filename+"'"
f.write( " %s %d \n" % (filename,ibingr) )
f.close()
return()
def write55(wrange,dw=1e-2,imode=0,hydprf=2,strength=1e-4,vmicro=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atmostype='kurucz'):
if (atmostype == 'tlusty' or atmostype == 'marcs'): inmod = 1
else: inmod = 0
f = open('fort.55','w')
f.write(" "+str(imode)+" "+2*zero+"\n")
f.write(" "+str(inmod)+3*zero+"\n")
f.write(5*zero+"\n")
f.write(one+4*zero+"\n")
f.write(str(hydprf)+2*zero+"\n")
if imode == -3:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], -wrange[1], 100., 2000, strength, dw) )
else:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], wrange[1], 200., 2000, strength, dw) )
ll = len(linelist)
if ll < 2: f.write(2*zero)
else: f.write(str(ll-1) + ' ' + ' '.join(map(str,np.arange(ll-1)+20)))
f.write("\n")
f.write( ' %f \n' % (vmicro) )
f.close()
def write5(teff,logg,abu, atom='ap18', ofile='fort.5', nlte=False, tl=False):
symbol, mass, sol = elements()
f = open(ofile,'w')
f.write(' '+str(teff)+" "+str(logg).format('%7.4f')+" ! TEFF, GRAV \n")
if nlte:
f.write(" F F ! LTE, GRAY \n")
else:
f.write(" T F ! LTE, GRAY \n")
f.write(" 'tas' ! name of non-standard flags \n")
f.write(" 50 ! frequencies \n")
if tl:
natom = 30
else:
natom = len(abu)
f.write(" "+str(natom)+" ! NATOMS \n")
assert (atom == 'hhm' or atom == 'ap18' or atom == 'yo19'), 'atom must be one of: hhm/ap18/yo19!'
ex = np.ones(natom)
if atom == 'hhm' :
zex = [1]
elif atom == 'yo19':
zex = [1,11,12,19,20]
elif atom == 'ap18':
zex = [1,2,6,7,8,11,12,13,14,20,26]
for i in zex: ex[i-1] = 2
if nlte: ex[0] = -3
for i in range(natom):
f.write(' %2d %e %i %s\n' % (ex[i], abu[i], 0, ' ! ' +symbol[i]) )
for i in range(3): f.write("* \n")
if atom == 'hhm':
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n" )
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == "yo19":
f.write("* ../data_atom for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 0' 'data_atom/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 16 0 0 0 ' H 1' 'data_atom/h1_16lev2.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 11 0 42 0 0 0 'Na 1' 'data_atom/NaIkas.tl' \n")
f.write(" 11 1 1 1 0 0 'Na 2' '' \n")
f.write(" 12 0 96 0 0 0 'Mg 1' 'data_atom/Mg1kas_F_ccc.tl' \n")
f.write(" 12 1 29 0 0 0 'Mg 2' 'data_atom/Mg2kas_F_ccc.tl' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 19 0 31 0 0 0 'K 1' 'data_atom/KIkas.tl' \n")
f.write(" 19 1 1 1 0 0 'K 2' '' \n")
f.write(" 20 0 66 0 0 0 'Ca 1' 'data_atom/Ca1kas_F_zat.tl' \n")
f.write(" 20 1 24 0 0 0 'Ca 2' 'data_atom/Ca2kas_F_zat.tl' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == 'ap18':
f.write("* ../data for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 2 0 14 0 0 0 'He 1' 'data/he1.dat' \n")
f.write(" 2 1 14 0 0 0 'He 2' 'data/he2.dat ' \n")
f.write(" 2 2 1 1 0 0 'He 3' ' ' \n")
f.write(" 6 0 104 0 0 0 ' C 1' 'data/c1.t' \n")
f.write(" 6 1 40 0 0 0 ' C 2' 'data/c2.t' \n")
f.write(" 6 2 1 1 0 0 ' C 3' ' ' \n")
f.write(" 7 0 89 0 0 0 ' N 1' 'data/n1.t' \n")
f.write(" 7 1 51 0 0 0 ' N 2' 'data/n2.t' \n")
f.write(" 7 2 1 1 0 0 ' N 3' ' ' \n")
f.write(" 8 0 54 0 0 0 ' O 1' 'data/o1.t' \n")
f.write(" 8 1 74 0 0 0 ' O 2' 'data/o2.t' \n")
f.write(" 8 2 1 1 0 0 ' O 3' ' ' \n")
f.write(" 11 0 32 0 0 0 'Na 1' 'data/na1.t' \n")
f.write(" 11 1 8 0 0 0 'Na 2' 'data/na2.t' \n")
f.write(" 11 2 1 1 0 0 'Na 3' ' ' \n")
f.write(" 12 0 71 0 0 0 'Mg 1' 'data/mg1.t' \n")
f.write(" 12 1 31 0 0 0 'Mg 2' 'data/mg2.t' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 13 0 33 0 0 0 'Al 1' 'data/al1.t' \n")
f.write(" 13 1 81 0 0 0 'Al 2' 'data/al2.t' \n")
f.write(" 13 2 1 1 0 0 'Al 3' ' ' \n")
f.write(" 14 0 57 0 0 0 'Si 1' 'data/si1.t' \n")
f.write(" 14 1 46 0 0 0 'Si 2' 'data/si2.t' \n")
f.write(" 14 2 1 1 0 0 'Si 3' ' ' \n")
f.write(" 20 0 79 0 0 0 'Ca 1' 'data/ca1.t' \n")
f.write(" 20 1 32 0 0 0 'Ca 2' 'data/ca2.t' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 26 0 49 0 0 0 'Fe 1' 'data/tlusty_fe1_topmod.dat' \n")
f.write(" 26 1 41 0 0 0 'Fe 2' 'data/tlusty_fe2_topmod.dat' \n")
f.write(" 26 2 1 1 0 0 'Fe 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
f.write("* \n")
f.write("* end \n")
f.close()
def write8(teff, logg, nd, atmos, atmostype, ofile='fort.8'):
f = open(ofile,'w')
if atmostype == 'tlusty':
f.write(" "+str(nd)+" "+str(3)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i] ) )
f.close()
else:
if atmostype == 'marcs':
f.write(" "+str(nd)+" "+str(-4)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i], atmos['rho'][i]/atmos['mmw'][i]/1.67333e-24 + atmos['ne'][i] ) )
f.close()
else:
f.write( 'TEFF %7.0f GRAVITY %7.5f LTE \n' % (teff, logg) )
for i in range(21): f.write('\n')
f.write( 'READ DECK6%3i RHOX,T,P,XNE \n' % nd )
for i in range(nd):
f.write( '%e %f %e %e \n' % (atmos['dm'][i], atmos['t'][i], atmos['p'][i], atmos['ne'][i]) )
f.close()
return()
def create_links(linelist):
for i in range(len(linelist)):
if not os.path.isfile(linelist[i]):
ll = os.path.join(linelistdir,linelist[i])
if os.path.isfile(ll): linelist[i] = ll
if i == 0: os.symlink(linelist[0],'fort.19')
else: os.symlink(linelist[i],'fort.'+str(20-1+i))
os.symlink(modelatomdir,'./data')
return()
def cleanup():
files = os.listdir('.')
for entry in files:
if os.path.islink(entry) and entry.startswith('fort'): os.unlink(entry)
if os.path.isfile(entry) and entry.startswith('fort'): os.remove(entry)
if os.path.islink('data'): os.unlink('data')
if os.path.isfile('tas'): os.remove('tas')
assert (not os.path.isdir('data')), 'A subdirectory *data* exists in this folder, and that prevents the creation of a link to the data directory for synple'
return()
def read_kurucz_model(modelfile):
f = open(modelfile,'r')
line = f.readline()
entries = line.split()
assert (entries[0] == 'TEFF' and entries[2] == 'GRAVITY'), 'Cannot find Teff and logg in the file header'
teff = float(entries[1])
logg = float(entries[3])
while entries[0] != 'ABUNDANCE':
line = f.readline()
entries = line.split()
abu = []
if entries[1] == 'SCALE':
scale = float(entries[2])
while entries[0] == 'ABUNDANCE':
i = 0
for word in entries:
if (word == 'CHANGE'): w = i
i = i + 1
for i in range(int((len(entries)-w-1)/2)):
z = int(entries[w+1+2*i])
if (z == 1): nhntot = float(entries[w+2+2*i])
if (z < 3): abu.append(float(entries[w+2+2*i]) / nhntot)
else: abu.append(scale*10.**(float(entries[w+2+2*i])) / nhntot)
line = f.readline()
entries = line.split()
assert (entries[0] == 'READ'), 'I cannot find the header of the atmospheric table in the input Kurucz model'
nd = int(entries[2]) - 1
line = f.readline()
entries = line.split()
line = f.readline()
entries = line.split()
vmicro = float(entries[6])/1e5
dm = [ float(entries[0]) ]
t = [ float(entries[1]) ]
p = [ float(entries[2]) ]
ne = [ float(entries[3]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[0]))
t.append( float(entries[1]))
p.append( float(entries[2]))
ne.append( float(entries[3]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model(modelfile):
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
dm = [ float(entries[-1]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[7]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model2(modelfile):
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
rho = [ float(entries[3]) ]
dm = [ float(entries[7]) ]
mmw = [ float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
rho.append( float(entries[3]))
dm.append( float(entries[7]))
mmw.append( float(entries[4]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'rho','mmw','ne'),
'formats':('f', 'f', 'f','f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['rho'] = rho
atmos['mmw'] = mmw
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_model(modelfile):
from astropy.io import fits
h = fits.open(modelfile)[0].header
f = fits.open(modelfile)[1].data
nd = len(f['temp'])
teff = float(h['PHXTEFF'])
logg = float(h['PHXLOGG'])
vmicro = float(h['PHXXI_L'])
m_h = float(h['PHXM_H'])
alpha = float(h['PHXALPHA'])
symbol, mass,sol = elements(husser=True)
abu = sol
z_metals = np.arange(97,dtype=int) + 3
z_alphas = np.array([8,10,12,14,16,20,22],dtype=int)
for i in range(len(z_metals)): abu[z_metals[i] - 1] = abu[z_metals[i] - 1] + m_h
for i in range(len(z_alphas)): abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] + alpha
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = f['pgas'] / 10.**logg
atmos['t'] = f['temp']
atmos['p'] = f['pgas']
atmos['ne'] = f['pe']/ bolk / f['temp']
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_text_model(modelfile):
f = open(modelfile,'r')
line = f.readline()
while line[0:4] != " no.":
line = f.readline()
entries = line.split()
nd = int(entries[5])
print('nd=',nd)
while line[0:14] != " model: teff":
line = f.readline()
entries = line.split()
teff = float(entries[3])
print('teff=',teff)
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[0] == 'log(g):' and entries[2] == '[cm/s**2]'), 'Cannot find logg in the file header'
logg = float(entries[1])
print('logg=',logg)
line = f.readline()
while line[0:22] != " Element abundances :":
line = f.readline()
symbol,mass,sol = elements()
sy = []
ab = []
while line[0:29] != " Element abundances relative":
line = f.readline()
if line[0:9] == ' element:':
entries = line.split()
for word in entries[1:]: sy.append(word)
if line[0:11] == ' abundance:':
entries = line.split()
for word in entries[1:]: ab.append(word)
assert (len(sy) == len(ab)), 'different elements in arrays sy (elemental symbols) and ab (abundances)'
abu = np.ones(99)*1e-99
i = 0
for item in sy:
try:
index = symbol.index(item)
abu[index] = 10.**(float(ab[i])-12.)
except ValueError:
print("the symbol ",item," is not recognized as a valid element")
i = i + 1
print('abu=',abu)
while line[0:72] != " l tstd temperature pgas pe density mu":
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[2].replace('D','E')) ]
p = [ float(entries[3].replace('D','E')) ]
ne = [ float(entries[4].replace('D','E')) / bolk / float(entries[2].replace('D','E')) ]
dm = [ float(entries[3].replace('D','E')) / 10.**logg ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[2].replace('D','E')))
p.append( float(entries[3].replace('D','E')))
ne.append( float(entries[4].replace('D','E')) / bolk / float(entries[2]))
dm.append ( float(entries[3].replace('D','E')) / 10.**logg )
vmicro = 0.0
while (line[0:6] != " greli"):
line = f.readline()
if line == '':
print('Cannot find a value for vmicro (vturb) in the model atmosphere file ',modelfile)
break
if line != '':
entries = line.split()
vmicro = float(entries[5])
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def interp_spl(xout, x, y):
tck = interpolate.splrep(x, y, s=0)
yout = interpolate.splev(xout, tck, der=0)
return(yout)
def elements(husser=False):
symbol = [
'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne',
'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca',
'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr',
'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn',
'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd',
'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg',
'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th',
'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ]
mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994,
18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376,
32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415,
51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723,
72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682,
112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29,
132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36,
151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421,
173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217,
195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210.,
222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244.,
243., 247., 247., 251., 252. ]
if not husser:
sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84,
6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31,
3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60,
2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59,
1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00,
1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45,
-9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08,
0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13,
0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
else:
sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93,
6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34,
3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56,
3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58,
1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04,
1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42,
-9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92,
0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17,
0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0)
return (symbol,mass,sol)
def lgconv(xinput, yinput, fwhm, ppr=None):
xx = np.diff(xinput)
if max(xx) - min(xx) > 1.e-7:
nel = len(xinput)
minx = np.min(xinput)
maxx = np.max(xinput)
x = np.linspace(minx,maxx,nel)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = x[1] - x[0]
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
print(npoints)
edge = int(npoints/2)
x = x[edge:-edge]
print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def vgconv(xinput,yinput,fwhm, ppr=None):
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7:
nel = len(xinput)
minx = np.log(xinput[0])
maxx = np.log(xinput[-1])
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = np.log(xinput[1])-np.log(xinput[0])
fwhm = fwhm/clight
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
edge = int(npoints/2)
x = x[edge:-edge]
if ppr != None:
fac = int(fwhm / step / ppr)
print(fwhm,step,ppr,fac)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def rotconv(xinput,yinput,vsini, ppr=None):
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7:
nel = len(xinput)
minx = np.min(np.log(xinput))
maxx = np.max(np.log(xinput))
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
deltamax=vsini/clight
npoints = 2*int(deltamax/step)+1
xx = np.linspace(-deltamax,deltamax,npoints)
c1=2.0*(1.0-epsilon)/np.pi/(1.0-epsilon/3.0)/deltamax
c2=0.5*epsilon/(1.0-epsilon/3.0)/deltamax
r2=(xx/deltamax)**2
kernel = c1*np.sqrt(1.0-r2)+c2*(1.0-r2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
print(xinput.size,x.size,y.size)
edge = int(npoints/2)
x = x[edge:-edge]
if ppr != None:
fac = int(deltamax / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def gsynth(synthfile,fwhm=0.0,outsynthfile=None,ppr=5,wrange=None,freeze=None):
if outsynthfile is None: outsynthfile='n'+synthfile[1:]
logw=0
fin = open(synthfile,'r')
fout = open(outsynthfile,'w')
hd = []
labels = []
line = fin.readline()
hd.append(line)
while line[1] != "/":
line = fin.readline()
if "N_P" in line: n_p = np.array(line.split()[2:],dtype=int)
if "STEPS" in line: steps = np.array(line.split()[2:],dtype=float)
if "LLIMITS" in line: llimits = np.array(line.split()[2:],dtype=float)
if "LABEL" in line: labels.append(line.split()[-1][1:-1])
if "NPIX" in line: npix = int(line.split()[2])
if "N_OF_DIM" in line: ndim = int(line.split()[2])
if "WAVE" in line: wave = np.array(line.split()[2:],dtype=float)
if "LOGW" in line: logw = int(line.split()[2])
if "RESOLUTION" in line: resolution = float(line.split()[2])
hd.append(line)
assert (len(n_p) == len(steps) & len(n_p) == len(llimits) & len(n_p) == len(labels) & len(n_p) == ndim), 'The dimension of the parameters from the header are inconsistent'
x = np.arange(npix)*wave[1]+wave[0]
if logw == 1: x=10.**x
if logw == 2: x=np.exp(x)
ll = []
ind_n_p = []
i = 0
for entry in labels:
if freeze is not None:
lfkeys = list(freeze.keys())
if entry not in lfkeys: ind_n_p.append(i)
else:
ind_n_p.append(i)
ll.append(np.arange(n_p[i]))
i = i + 1
ind = list(product(*ll))
if wrange is not None:
assert (len(wrange) == 2), 'Error: wrange must have two elements'
section1 = np.where( (x >= wrange[0]*(1.-10.*fwhm/clight)) & (x <= wrange[1]*(1.+10.*fwhm/clight)) )
x = x[section1]
npix = len(x)
if fwhm > 1.e-7:
y = np.ones(npix)
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
print('Warning -- fwhm <= 1.e-7, no convolution will be performed, ppr will be ignored')
xx = x
print(len(x),len(xx))
if wrange is not None:
section2 = np.where( (xx >= wrange[0]) & (xx <= wrange[1]) )
xx = xx [section2]
jlabel = 0
for line in hd:
if "N_OF_DIM" in line: line = " N_OF_DIM = "+str(len(ind_n_p))+"\n"
if "N_P" in line: line = " N_P = "+' '.join(map(str,n_p[ind_n_p]))+"\n"
if "STEPS" in line: line = " STEPS = "+' '.join(map(str,steps[ind_n_p]))+"\n"
if "LLIMITS" in line: line = " LLIMITS = "+' '.join(map(str,llimits[ind_n_p]))+"\n"
if freeze is not None:
if "LABEL" in line:
ilabel = line.split()[-1][1:-1]
if ilabel in lfkeys:
continue
else:
jlabel = jlabel + 1
line = " LABEL("+str(jlabel)+") = "+ilabel+"\n"
if "NPIX" in line: line = " NPIX = "+str(len(xx))+"\n"
if "WAVE" in line: line = " WAVE = "+str(np.log10(xx[0]))+" "+str(np.log10(xx[1])-np.log10(xx[0]))+"\n"
if "LOGW" in line: line = " LOGW = 1 \n"
if "RESOLUTION" in line: line = " RESOLUTION = "+str(clight/np.sqrt(clight**2/resolution**2 + fwhm**2))+"\n"
fout.write(line)
k = 0
j = 0
ntot = np.prod(n_p)
for i in ind:
j = j + 1
print('line ',j,' of ',ntot)
par = i*steps+llimits
line = fin.readline()
if freeze is not None:
skip = True
for entry in lfkeys:
if (abs(freeze[entry] - par[labels.index(entry)]) < 1e-6): skip = False
if skip: continue
y = np.array(line.split(),dtype=float)
if wrange is not None: y = y [section1]
if fwhm > 1.e-7:
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
xx,yy = x, y
if wrange is not None: yy = yy[section2]
yy.tofile(fout,sep=" ",format="%0.4e")
fout.write("\n")
k = k + 1
fin.close()
fout.close()
if __name__ == "__main__":
npar = len(sys.argv)
assert (npar >= 4), 'Synple requires at least 3 input parameters (modelfile wstart wend)'
assert (npar <= 7), 'Synple requires at maximum 6 input parameters (modelfile wstart wend vmicro vrot fwhm)'
vmicro = None
vrot = 0.0
fwhm = 0.0
modelfile = sys.argv[1]
wstart = float(sys.argv[2])
wend = float(sys.argv[3])
if (npar > 4):
vmicro = float(sys.argv[4])
if (npar > 5):
fwhm = float(sys.argv[5])
if (npar > 6):
vrot = float(sys.argv[6])
x, y, z = syn(modelfile, (wstart,wend), save=True, vmicro=vmicro, vrot=vrot, fwhm=fwhm)
| true | true |
f71d4338552d68cb454ffab368d1505a64bd44e3 | 267 | py | Python | datacrawler/datacrawler/items.py | NgocSon15/tmdt | a66456ff9e9b16d2b4d5f48014e14daf1c4bbdcb | [
"Apache-2.0"
] | null | null | null | datacrawler/datacrawler/items.py | NgocSon15/tmdt | a66456ff9e9b16d2b4d5f48014e14daf1c4bbdcb | [
"Apache-2.0"
] | null | null | null | datacrawler/datacrawler/items.py | NgocSon15/tmdt | a66456ff9e9b16d2b4d5f48014e14daf1c4bbdcb | [
"Apache-2.0"
] | null | null | null | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DatacrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 20.538462 | 53 | 0.719101 |
import scrapy
class DatacrawlerItem(scrapy.Item):
pass
| true | true |
f71d43880aab3dfc9d94437f8da31184d6a449f4 | 915 | py | Python | TIL/bs_mog2.py | FLY-CODE77/opencv | 5644e6c1ef43d81efb54ccde6c06f1adf000fb96 | [
"MIT"
] | 1 | 2020-10-23T14:29:24.000Z | 2020-10-23T14:29:24.000Z | TIL/bs_mog2.py | FLY-CODE77/opencv | 5644e6c1ef43d81efb54ccde6c06f1adf000fb96 | [
"MIT"
] | null | null | null | TIL/bs_mog2.py | FLY-CODE77/opencv | 5644e6c1ef43d81efb54ccde6c06f1adf000fb96 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import cv2
cap = cv2.VideoCapture('PETS2000.avi')
if not cap.isOpened():
print('video wrong')
sys.exit()
bs = cv2.createBackgroundSubtractorMOG2()
# knn method backgroundSubtractor
# bs = cv2.createBackgroundSubtractorKNN()
# don't care about shadows
# bs.setDetectShadows(False)
while True:
ret , frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)
fgmask = bs.apply(gray)
back = bs.getBackgroundImage()
cnt, _, stats, _ = cv2.connectedComponentsWithStats(fgmask)
for i in range(1, cnt):
x, y, w, h, s = stats[i]
if s <80:
continue
cv2.rectangle(frame, (x, y, w, h), (0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('back', back)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(30) == 27:
break
cap.release()
cv2.destroyAllWindows() | 20.333333 | 63 | 0.630601 | import sys
import numpy as np
import cv2
cap = cv2.VideoCapture('PETS2000.avi')
if not cap.isOpened():
print('video wrong')
sys.exit()
bs = cv2.createBackgroundSubtractorMOG2()
# bs.setDetectShadows(False)
while True:
ret , frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)
fgmask = bs.apply(gray)
back = bs.getBackgroundImage()
cnt, _, stats, _ = cv2.connectedComponentsWithStats(fgmask)
for i in range(1, cnt):
x, y, w, h, s = stats[i]
if s <80:
continue
cv2.rectangle(frame, (x, y, w, h), (0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('back', back)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(30) == 27:
break
cap.release()
cv2.destroyAllWindows() | true | true |
f71d44232caa9293b8d304ae7d59130d934d6f87 | 3,223 | py | Python | pyportlib/services/cash_manager.py | phil-lo/pyportlib | 96f8531c0c18c58d3476832de9d4c9b2c9285f62 | [
"MIT"
] | 2 | 2021-11-18T21:40:02.000Z | 2021-12-13T21:01:18.000Z | pyportlib/services/cash_manager.py | phil-lo/PortfolioCore | 3fbe7460c809a80e48615e934990dcd2d1f5003b | [
"CC0-1.0"
] | 1 | 2022-01-24T21:10:09.000Z | 2022-01-24T21:10:09.000Z | pyportlib/services/cash_manager.py | phil-lo/PortfolioCore | 3fbe7460c809a80e48615e934990dcd2d1f5003b | [
"CC0-1.0"
] | null | null | null | from datetime import datetime
from typing import List, Union
import pandas as pd
from pyportlib.services.cash_change import CashChange
from pyportlib.utils import df_utils, files_utils
from pyportlib.utils import logger
class CashManager:
NAME = "Cash Account"
ACCOUNTS_DIRECTORY = files_utils.get_accounts_dir()
CASH_INFO = ['Date', 'Direction', 'Amount']
CASH_FILENAME = "cash.csv"
def __init__(self, account):
self.account = account
self.directory = f"{self.ACCOUNTS_DIRECTORY}{self.account}"
self._cash_changes = pd.DataFrame()
self.load()
def __repr__(self):
return self.NAME
def load(self) -> None:
"""
Loads account cash changes from .csv of creates empty one if it is a new account
:return:
"""
if files_utils.check_file(self.directory, self.CASH_FILENAME):
cash = pd.read_csv(f"{self.directory}/{self.CASH_FILENAME}")
try:
cash.drop(columns='Unnamed: 0', inplace=True)
except KeyError:
pass
finally:
if df_utils.check_df_columns(df=cash, columns=self.CASH_INFO):
cash.set_index('Date', inplace=True)
cash.index.name = 'Date'
cash.index = pd.to_datetime(cash.index)
self._cash_changes = cash
else:
logger.logging.info(f'cash file does not match requirements: {self.account}')
else:
# if new ptf, create required files to use it
if not files_utils.check_dir(self.directory):
files_utils.make_dir(self.directory)
# create empty transaction file in new directory
empty_cash = self._empty_cash()
empty_cash.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self._cash_changes = empty_cash
@property
def cash_changes(self):
return self._cash_changes
def get_cash_change(self, date: datetime):
c_ch = self.cash_changes
return c_ch.loc[self.cash_changes.index <= date, 'Amount'].sum()
def _write(self, date: datetime, direction: str, amount: float):
direction = direction.title()
if direction not in ['Deposit', 'Withdrawal']:
raise Exception(f'cash direction type not supported {direction}')
self.cash_changes.loc[date, "Direction"] = direction
self.cash_changes.loc[date, "Amount"] = amount
self.cash_changes.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self.load()
def add(self, cash_changes: Union[List[CashChange], CashChange]):
if cash_changes:
if not hasattr(cash_changes, '__iter__'):
cash_changes = [cash_changes]
for cc in cash_changes:
cc = cc.info
self._write(date=cc["Date"], direction=cc['Direction'], amount=cc['Amount'])
def reset(self):
empty_cash = self._empty_cash()
empty_cash.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self._cash_changes = empty_cash
def _empty_cash(self):
return pd.DataFrame(columns=self.CASH_INFO).set_index('Date')
| 36.213483 | 97 | 0.619299 | from datetime import datetime
from typing import List, Union
import pandas as pd
from pyportlib.services.cash_change import CashChange
from pyportlib.utils import df_utils, files_utils
from pyportlib.utils import logger
class CashManager:
NAME = "Cash Account"
ACCOUNTS_DIRECTORY = files_utils.get_accounts_dir()
CASH_INFO = ['Date', 'Direction', 'Amount']
CASH_FILENAME = "cash.csv"
def __init__(self, account):
self.account = account
self.directory = f"{self.ACCOUNTS_DIRECTORY}{self.account}"
self._cash_changes = pd.DataFrame()
self.load()
def __repr__(self):
return self.NAME
def load(self) -> None:
if files_utils.check_file(self.directory, self.CASH_FILENAME):
cash = pd.read_csv(f"{self.directory}/{self.CASH_FILENAME}")
try:
cash.drop(columns='Unnamed: 0', inplace=True)
except KeyError:
pass
finally:
if df_utils.check_df_columns(df=cash, columns=self.CASH_INFO):
cash.set_index('Date', inplace=True)
cash.index.name = 'Date'
cash.index = pd.to_datetime(cash.index)
self._cash_changes = cash
else:
logger.logging.info(f'cash file does not match requirements: {self.account}')
else:
if not files_utils.check_dir(self.directory):
files_utils.make_dir(self.directory)
empty_cash = self._empty_cash()
empty_cash.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self._cash_changes = empty_cash
@property
def cash_changes(self):
return self._cash_changes
def get_cash_change(self, date: datetime):
c_ch = self.cash_changes
return c_ch.loc[self.cash_changes.index <= date, 'Amount'].sum()
def _write(self, date: datetime, direction: str, amount: float):
direction = direction.title()
if direction not in ['Deposit', 'Withdrawal']:
raise Exception(f'cash direction type not supported {direction}')
self.cash_changes.loc[date, "Direction"] = direction
self.cash_changes.loc[date, "Amount"] = amount
self.cash_changes.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self.load()
def add(self, cash_changes: Union[List[CashChange], CashChange]):
if cash_changes:
if not hasattr(cash_changes, '__iter__'):
cash_changes = [cash_changes]
for cc in cash_changes:
cc = cc.info
self._write(date=cc["Date"], direction=cc['Direction'], amount=cc['Amount'])
def reset(self):
empty_cash = self._empty_cash()
empty_cash.to_csv(f"{self.directory}/{self.CASH_FILENAME}")
self._cash_changes = empty_cash
def _empty_cash(self):
return pd.DataFrame(columns=self.CASH_INFO).set_index('Date')
| true | true |
f71d4483c26638ff4b3ecc30a2d816789e000b26 | 15,403 | py | Python | quests/forms.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null | quests/forms.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null | quests/forms.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null |
from django import forms
from .models import Quests, PACKAGE_SELECTION
class QuestCreationForm(forms.ModelForm):
"""
A form that creates a post, from the given data
"""
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('Richmond Hill', 'Richmond Hill'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={
'required': 'We need to know how you like your item to be shipped!',
'invalid_choice': 'Please select one of the options available !',
},
widget=forms.Select(
attrs={
'class': 'form-control m-b',
'required': 'true'
}
)
)
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
widget=forms.Select(
attrs={
'class': 'form-control input-lg',
'data-size': '5',
'required': 'true'
}
)
)
srcaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be picked up from is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Street Address, P.O box, company name, etc',
'required': 'true'
}
)
)
srcaddress_2 = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Apartment, suite, unit, building, floor, etc.',
'required': 'false'
}
)
)
srcname = forms.CharField(
error_messages={
'required': 'Name of the sender is required!',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'John Doe',
'required': 'true'
}
)
)
srcphone = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'data-mask': '(999) 999-9999',
'placeholder': '(123) 456 - 7890',
'required': 'true'
}
)
)
srcpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'M8V 0A5',
'required': 'true',
}
)
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
widget=forms.Select(
attrs={
'class': 'form-control input-lg',
'data-size': '5',
'required': 'true',
}
)
)
dstaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be dropped off from is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Street Address, P.O box, company name, etc',
'required': 'true',
}
)
)
dstaddress_2 = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Apartment, suite, unit, building, floor, etc.',
}
)
)
dstname = forms.CharField(
error_messages={
'required': 'Name of the receiver is required!',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'John Doe',
'required': 'true',
}
)
)
dstphone = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'data-mask': '(999) 999-9999',
'placeholder': '(123) 456 - 7890',
'required': 'true',
}
)
)
dstpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'M8V 0A5',
'required': 'true',
}
)
)
class Meta:
model = Quests
fields = ['title', 'size', 'description']
def __init__(self, *args, **kwargs):
super(QuestCreationForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs = {
'class': 'form-control input-lg',
'required': 'true',
'placeholder': 'Name of your shipment.'
}
self.fields['description'].widget.attrs = {
'class': 'form-control',
'rows': '5',
'required': 'true',
'placeholder': 'Tell us about your shipment.'
}
class QuestConfirmForm(forms.ModelForm):
"""
A form that creates a post, from the given data
"""
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('Richmond Hill', 'Richmond Hill'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={
'required': 'We need to know how you like your item to be shipped!',
'invalid_choice': 'Please select one of the options available !',
},
)
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
)
srcaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be picked up from is required !',
},
)
srcaddress_2 = forms.CharField(
required=False,
)
srcname = forms.CharField(
error_messages={
'required': 'Name of the sender is required!',
},
)
srcphone = forms.CharField(
required=False,
)
srcpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
)
dstaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be dropped off from is required !',
},
)
dstaddress_2 = forms.CharField(
required=False,
)
dstname = forms.CharField(
error_messages={
'required': 'Name of the receiver is required!',
},
)
dstphone = forms.CharField(
required=False,
)
dstpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
)
# PICKUP_TIME_SELECTION = (('now','Now'),('not_now','Not_now'))
# NOT_NOW_SELECTION = (('Today','Today'),('Tomorrow','Tomorrow'))
# pickup_time = forms.ChoiceField(choices=PICKUP_TIME_SELECTION, widget=forms.RadioSelect())
# pickup_when = forms.ChoiceField(required=False,
# choices=NOT_NOW_SELECTION,
# error_messages={
# 'invalid_choice' : 'Please select one of the options available !'
# })
# not_now_pickup_time = forms.CharField(required=False)
class Meta:
model = Quests
exclude = ['questrs','status','creation_date','isaccepted', 'shipper', 'delivery_code', 'pickup', \
'dropoff', 'delivery_date', 'map_image','available_couriers','tracking_number', 'pickup_time','considered_couriers']
error_messages = {
'size' : {
'required' : 'We need to know how you like your item to be shipped!',
'invalid_choice' : 'Please select one of the options available !',
},
'title' : {
'required' : 'A title is required !',
},
'reward' : {
'required' : 'Every shipment has a price!',
'invalid' : 'There is a limit to how much one pays for a shipment!',
},
}
# class QuestChangeForm(forms.ModelForm):
# """
# A form to edit a quest that has been created already
# """
# CITY_SELECTION = (('Toronto','Toronto'),('Brampton','Brampton'),('Markham','Markham'),
# ('Mississauga','Mississauga'),('Richmond Hill','Richmond Hill'),('Vaughan','Vaughan'),
# ('Oakville','Oakville'))
# srccity = forms.ChoiceField(
# choices=CITY_SELECTION,
# error_messages={
# 'required' : 'Name of the city is required !',
# 'invalid_choice' : 'Please select one of the options available !'
# }
# )
# srcaddress = forms.CharField(
# error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
# )
# srcname = forms.CharField(
# error_messages={'required' : 'Name of the sender is required!',}
# )
# srcphone = forms.CharField(required=False)
# srcpostalcode = forms.CharField(
# error_messages={'required' : 'Your postcode is required !',}
# )
# dstcity = forms.ChoiceField(
# choices=CITY_SELECTION,
# error_messages={'required' : 'Name of the city is required !',
# 'invalid_choice' : 'Please select one of the options available !',
# }
# )
# dstaddress = forms.CharField(
# error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
# )
# dstname = forms.CharField(
# error_messages={'required' : 'Name of the sender is required!',}
# )
# dstphone = forms.CharField(required=False)
# dstpostalcode = forms.CharField(
# error_messages={'required' : 'Your postcode is required !',}
# )
# class Meta:
# model = Quests
# exclude = ['questrs','reward','status','creation_date','isaccepted', 'shipper', 'distance', 'delivery_code', \
# 'item_images', 'pickup', 'dropoff', 'delivery_date', 'map_image','available_couriers','tracking_number']
# widget = {
# 'description' : forms.TextInput(attrs = { 'placeholder': "Description"}),
# 'title' : forms.TextInput(attrs = { 'placeholder': 'Title'}),
# 'size' : forms.RadioSelect(attrs = { 'default': "backpack"}),
# 'srccity' : forms.Select(attrs = { 'placeholder': "toronto"}),
# 'srcaddress' : forms.TextInput(attrs = { 'placeholder': "Departure Address"}),
# 'srcname' : forms.TextInput(attrs = { 'placeholder': "John Doe"}),
# 'srcphone' : forms.TextInput(attrs = { 'placeholder': "+111-222-333"}),
# 'srcpostal' : forms.TextInput(attrs = { 'placeholder': "+111-222-333"}),
# 'dstcity' : forms.Select(attrs = { 'placeholder': "toronto"}),
# 'dstaddress' : forms.TextInput(attrs = { 'placeholder': "Delivery Address"}),
# 'dstname' : forms.TextInput(attrs = { 'placeholder': "John Doe"}),
# 'dstphone' : forms.TextInput(attrs = { 'placeholder': "+111-222-333"}),
# 'dstpostalcode' : forms.TextInput(attrs = { 'placeholder': "+111-222-333"}),
# }
# error_messages = {
# 'size' : {
# 'required' : 'We need to know how you like your item to be shipped!',
# 'invalid_choice' : 'Please select one of the options available !',
# },
# 'title' : {
# 'required' : 'A title is required !',
# },
# }
class DistancePriceForm(forms.Form):
"""
A form to get distance relative information
"""
def __init__(self, *args, **kwargs):
super(DistancePriceForm, self).__init__(*args, **kwargs)
CITY_SELECTION = (('Toronto','Toronto'),('Brampton','Brampton'),('Markham','Markham'),
('Mississauga','Mississauga'),('Richmond Hill','Richmond Hill'),('Vaughan','Vaughan'),
('Oakville','Oakville'))
PACKAGE_SELECTION = (('car','Car'),('backpack','Backpack'),('minivan','Minivan'))
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required' : 'Name of the city is required !',
'invalid_choice' : 'Please select one of the options available !'
}
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={'required' : 'We need to know how you like your item to be shipped!',
'invalid_choice' : 'Please select one of the options available !',
}
)
srcaddress = forms.CharField(
error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
)
srcpostalcode = forms.CharField(
error_messages={'required' : 'Your postcode is required !',}
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={'required' : 'Name of the city is required !',
'invalid_choice' : 'Please select one of the options available !',
}
)
dstaddress = forms.CharField(
error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
)
dstpostalcode = forms.CharField(
error_messages={'required' : 'Your postcode is required !',}
)
class TrackingNumberSearchForm(forms.Form):
"""A form to get details of the shipment from the tracking number"""
def __init__(self, *args, **kwargs):
super(TrackingNumberSearchForm, self).__init__(*args, **kwargs)
tracking_number = forms.CharField(
error_messages = {'required':'Please provide with a tracking number'}
)
| 34.002208 | 128 | 0.52704 |
from django import forms
from .models import Quests, PACKAGE_SELECTION
class QuestCreationForm(forms.ModelForm):
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('Richmond Hill', 'Richmond Hill'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={
'required': 'We need to know how you like your item to be shipped!',
'invalid_choice': 'Please select one of the options available !',
},
widget=forms.Select(
attrs={
'class': 'form-control m-b',
'required': 'true'
}
)
)
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
widget=forms.Select(
attrs={
'class': 'form-control input-lg',
'data-size': '5',
'required': 'true'
}
)
)
srcaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be picked up from is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Street Address, P.O box, company name, etc',
'required': 'true'
}
)
)
srcaddress_2 = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Apartment, suite, unit, building, floor, etc.',
'required': 'false'
}
)
)
srcname = forms.CharField(
error_messages={
'required': 'Name of the sender is required!',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'John Doe',
'required': 'true'
}
)
)
srcphone = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'data-mask': '(999) 999-9999',
'placeholder': '(123) 456 - 7890',
'required': 'true'
}
)
)
srcpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'M8V 0A5',
'required': 'true',
}
)
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
widget=forms.Select(
attrs={
'class': 'form-control input-lg',
'data-size': '5',
'required': 'true',
}
)
)
dstaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be dropped off from is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Street Address, P.O box, company name, etc',
'required': 'true',
}
)
)
dstaddress_2 = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'Apartment, suite, unit, building, floor, etc.',
}
)
)
dstname = forms.CharField(
error_messages={
'required': 'Name of the receiver is required!',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'John Doe',
'required': 'true',
}
)
)
dstphone = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'data-mask': '(999) 999-9999',
'placeholder': '(123) 456 - 7890',
'required': 'true',
}
)
)
dstpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
widget=forms.TextInput(
attrs={
'class': 'form-control input-lg',
'placeholder': 'M8V 0A5',
'required': 'true',
}
)
)
class Meta:
model = Quests
fields = ['title', 'size', 'description']
def __init__(self, *args, **kwargs):
super(QuestCreationForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs = {
'class': 'form-control input-lg',
'required': 'true',
'placeholder': 'Name of your shipment.'
}
self.fields['description'].widget.attrs = {
'class': 'form-control',
'rows': '5',
'required': 'true',
'placeholder': 'Tell us about your shipment.'
}
class QuestConfirmForm(forms.ModelForm):
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('Richmond Hill', 'Richmond Hill'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={
'required': 'We need to know how you like your item to be shipped!',
'invalid_choice': 'Please select one of the options available !',
},
)
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
)
srcaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be picked up from is required !',
},
)
srcaddress_2 = forms.CharField(
required=False,
)
srcname = forms.CharField(
error_messages={
'required': 'Name of the sender is required!',
},
)
srcphone = forms.CharField(
required=False,
)
srcpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required': 'Name of the city is required !',
'invalid_choice': 'Please select one of the options available !'
},
)
dstaddress = forms.CharField(
error_messages={
'required': 'Street/Apt. Address where the \
shipment is to be dropped off from is required !',
},
)
dstaddress_2 = forms.CharField(
required=False,
)
dstname = forms.CharField(
error_messages={
'required': 'Name of the receiver is required!',
},
)
dstphone = forms.CharField(
required=False,
)
dstpostalcode = forms.CharField(
error_messages={
'required': 'Your postcode is required !',
},
)
class Meta:
model = Quests
exclude = ['questrs','status','creation_date','isaccepted', 'shipper', 'delivery_code', 'pickup', \
'dropoff', 'delivery_date', 'map_image','available_couriers','tracking_number', 'pickup_time','considered_couriers']
error_messages = {
'size' : {
'required' : 'We need to know how you like your item to be shipped!',
'invalid_choice' : 'Please select one of the options available !',
},
'title' : {
'required' : 'A title is required !',
},
'reward' : {
'required' : 'Every shipment has a price!',
'invalid' : 'There is a limit to how much one pays for a shipment!',
},
}
# A form to edit a quest that has been created already
# """
class DistancePriceForm(forms.Form):
def __init__(self, *args, **kwargs):
super(DistancePriceForm, self).__init__(*args, **kwargs)
CITY_SELECTION = (('Toronto','Toronto'),('Brampton','Brampton'),('Markham','Markham'),
('Mississauga','Mississauga'),('Richmond Hill','Richmond Hill'),('Vaughan','Vaughan'),
('Oakville','Oakville'))
PACKAGE_SELECTION = (('car','Car'),('backpack','Backpack'),('minivan','Minivan'))
srccity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={
'required' : 'Name of the city is required !',
'invalid_choice' : 'Please select one of the options available !'
}
)
size = forms.ChoiceField(
choices=PACKAGE_SELECTION,
error_messages={'required' : 'We need to know how you like your item to be shipped!',
'invalid_choice' : 'Please select one of the options available !',
}
)
srcaddress = forms.CharField(
error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
)
srcpostalcode = forms.CharField(
error_messages={'required' : 'Your postcode is required !',}
)
dstcity = forms.ChoiceField(
choices=CITY_SELECTION,
error_messages={'required' : 'Name of the city is required !',
'invalid_choice' : 'Please select one of the options available !',
}
)
dstaddress = forms.CharField(
error_messages={'required' : 'Street/Apt. Address where the shipment is to be picked up from is required !',}
)
dstpostalcode = forms.CharField(
error_messages={'required' : 'Your postcode is required !',}
)
class TrackingNumberSearchForm(forms.Form):
def __init__(self, *args, **kwargs):
super(TrackingNumberSearchForm, self).__init__(*args, **kwargs)
tracking_number = forms.CharField(
error_messages = {'required':'Please provide with a tracking number'}
)
| true | true |
f71d44a480e0696a5e3de4133e6459faae7aca1e | 6,235 | py | Python | tests/test_parse_model.py | covid-19-impact-lab/sid | d867f55d4d005b01c672bd2edd0e1dc974cb182b | [
"MIT"
] | 18 | 2020-04-18T09:18:52.000Z | 2021-10-19T02:42:39.000Z | tests/test_parse_model.py | covid-19-impact-lab/sid | d867f55d4d005b01c672bd2edd0e1dc974cb182b | [
"MIT"
] | 143 | 2020-04-18T16:58:20.000Z | 2022-03-07T22:16:03.000Z | tests/test_parse_model.py | covid-19-impact-lab/sid | d867f55d4d005b01c672bd2edd0e1dc974cb182b | [
"MIT"
] | 1 | 2021-01-07T07:38:53.000Z | 2021-01-07T07:38:53.000Z | from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from sid.config import DEFAULT_VIRUS_STRAINS
from sid.config import INITIAL_CONDITIONS
from sid.parse_model import parse_duration
from sid.parse_model import parse_initial_conditions
from sid.parse_model import parse_virus_strains
@pytest.mark.unit
@pytest.mark.parametrize(
"duration, expectation, expected",
[
(
{"start": "2020-01-01", "end": "2020-01-02"},
does_not_raise(),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
(
{"start": "2020-01-01", "periods": 2},
does_not_raise(),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
(
{"start": "2020-01-01", "periods": 2, "freq": "s"},
pytest.warns(UserWarning, match="Only 'start', 'end', and 'periods'"),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
({"periods": 2}, pytest.raises(ValueError, match="Of the four"), None),
],
)
def test_parse_duration(duration, expectation, expected):
with expectation:
result = parse_duration(duration)
for k in result:
if k == "dates":
assert np.all(result[k] == expected[k])
else:
assert result[k] == expected[k]
@pytest.mark.unit
@pytest.mark.parametrize(
(
"initial_conditions",
"start_date_simulation",
"virus_strains",
"expectation",
"expected",
),
[
(
None,
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{**INITIAL_CONDITIONS, "virus_shares": {"base_strain": 1.0}},
),
(
{"assort_by": ["region"]},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{
**INITIAL_CONDITIONS,
"assort_by": ["region"],
"virus_shares": {"base_strain": 1.0},
},
),
(
{"assort_by": "region"},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{
**INITIAL_CONDITIONS,
"assort_by": ["region"],
"virus_shares": {"base_strain": 1.0},
},
),
(
{"growth_rate": 0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'growth_rate' must be greater than or"),
None,
),
(
{"burn_in_periods": 0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'burn_in_periods' must be greater or"),
None,
),
(
{"burn_in_periods": 2.0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'burn_in_periods' must be an integer"),
None,
),
(
{"initial_infections": None},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'initial_infections' must be a"),
None,
),
],
)
def test_parse_initial_conditions(
initial_conditions, start_date_simulation, virus_strains, expectation, expected
):
with expectation:
result = parse_initial_conditions(
initial_conditions, start_date_simulation, virus_strains
)
expected["burn_in_periods"] = pd.DatetimeIndex([pd.Timestamp("2020-01-01")])
assert result == expected
@pytest.mark.unit
@pytest.mark.parametrize(
"virus_strains, params, expectation, expected",
[
pytest.param(None, None, does_not_raise(), DEFAULT_VIRUS_STRAINS, id="default"),
pytest.param(
[],
None,
pytest.raises(ValueError, match="The list of"),
None,
id="empty list",
),
pytest.param(
["b117"],
pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[], names=["category", "subcategory", "value"]
)
),
pytest.raises(ValueError, match="Some factors for the infectiousness"),
None,
id="missing param",
),
pytest.param(
["wild_strain", "b117"],
pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
("virus_strains", "wild_strain", "factor"),
("virus_strains", "b117", "factor"),
],
names=["category", "subcategory", "value"],
),
),
pytest.raises(ValueError, match="Some factors for the infectiousness"),
{"names": ["b117", "wild_strain"]},
id="usual parsing",
),
pytest.param(
set(),
None,
pytest.raises(ValueError, match="'virus_strains' is not 'None'"),
None,
id="wrong input",
),
],
)
def test_parse_virus_strains(virus_strains, params, expectation, expected):
with expectation:
result = parse_virus_strains(virus_strains, params)
assert result["names"] == expected["names"]
assert "factors" not in result
| 32.815789 | 88 | 0.503288 | from contextlib import ExitStack as does_not_raise
import numpy as np
import pandas as pd
import pytest
from sid.config import DEFAULT_VIRUS_STRAINS
from sid.config import INITIAL_CONDITIONS
from sid.parse_model import parse_duration
from sid.parse_model import parse_initial_conditions
from sid.parse_model import parse_virus_strains
@pytest.mark.unit
@pytest.mark.parametrize(
"duration, expectation, expected",
[
(
{"start": "2020-01-01", "end": "2020-01-02"},
does_not_raise(),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
(
{"start": "2020-01-01", "periods": 2},
does_not_raise(),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
(
{"start": "2020-01-01", "periods": 2, "freq": "s"},
pytest.warns(UserWarning, match="Only 'start', 'end', and 'periods'"),
{
"start": pd.Timestamp("2020-01-01"),
"end": pd.Timestamp("2020-01-02"),
"dates": pd.DatetimeIndex(pd.to_datetime(["2020-01-01", "2020-01-02"])),
},
),
({"periods": 2}, pytest.raises(ValueError, match="Of the four"), None),
],
)
def test_parse_duration(duration, expectation, expected):
with expectation:
result = parse_duration(duration)
for k in result:
if k == "dates":
assert np.all(result[k] == expected[k])
else:
assert result[k] == expected[k]
@pytest.mark.unit
@pytest.mark.parametrize(
(
"initial_conditions",
"start_date_simulation",
"virus_strains",
"expectation",
"expected",
),
[
(
None,
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{**INITIAL_CONDITIONS, "virus_shares": {"base_strain": 1.0}},
),
(
{"assort_by": ["region"]},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{
**INITIAL_CONDITIONS,
"assort_by": ["region"],
"virus_shares": {"base_strain": 1.0},
},
),
(
{"assort_by": "region"},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
does_not_raise(),
{
**INITIAL_CONDITIONS,
"assort_by": ["region"],
"virus_shares": {"base_strain": 1.0},
},
),
(
{"growth_rate": 0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'growth_rate' must be greater than or"),
None,
),
(
{"burn_in_periods": 0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'burn_in_periods' must be greater or"),
None,
),
(
{"burn_in_periods": 2.0},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'burn_in_periods' must be an integer"),
None,
),
(
{"initial_infections": None},
pd.Timestamp("2020-01-02"),
{"names": ["base_strain"], "factors": np.ones(1)},
pytest.raises(ValueError, match="'initial_infections' must be a"),
None,
),
],
)
def test_parse_initial_conditions(
initial_conditions, start_date_simulation, virus_strains, expectation, expected
):
with expectation:
result = parse_initial_conditions(
initial_conditions, start_date_simulation, virus_strains
)
expected["burn_in_periods"] = pd.DatetimeIndex([pd.Timestamp("2020-01-01")])
assert result == expected
@pytest.mark.unit
@pytest.mark.parametrize(
"virus_strains, params, expectation, expected",
[
pytest.param(None, None, does_not_raise(), DEFAULT_VIRUS_STRAINS, id="default"),
pytest.param(
[],
None,
pytest.raises(ValueError, match="The list of"),
None,
id="empty list",
),
pytest.param(
["b117"],
pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[], names=["category", "subcategory", "value"]
)
),
pytest.raises(ValueError, match="Some factors for the infectiousness"),
None,
id="missing param",
),
pytest.param(
["wild_strain", "b117"],
pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
("virus_strains", "wild_strain", "factor"),
("virus_strains", "b117", "factor"),
],
names=["category", "subcategory", "value"],
),
),
pytest.raises(ValueError, match="Some factors for the infectiousness"),
{"names": ["b117", "wild_strain"]},
id="usual parsing",
),
pytest.param(
set(),
None,
pytest.raises(ValueError, match="'virus_strains' is not 'None'"),
None,
id="wrong input",
),
],
)
def test_parse_virus_strains(virus_strains, params, expectation, expected):
with expectation:
result = parse_virus_strains(virus_strains, params)
assert result["names"] == expected["names"]
assert "factors" not in result
| true | true |
f71d462f369c09cf273f8dcf82d84fce5362259e | 488 | py | Python | cpc/asm/Assembly.py | U-Ar/Cpresto | f723458fb237c9e3e8bc8a6afdf7c81858a65363 | [
"BSD-3-Clause"
] | 1 | 2021-05-09T07:10:19.000Z | 2021-05-09T07:10:19.000Z | cpc/asm/Assembly.py | U-Ar/Cpresto | f723458fb237c9e3e8bc8a6afdf7c81858a65363 | [
"BSD-3-Clause"
] | null | null | null | cpc/asm/Assembly.py | U-Ar/Cpresto | f723458fb237c9e3e8bc8a6afdf7c81858a65363 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABCMeta, abstractmethod
class Assembly(metaclass=ABCMeta):
@abstractmethod
def to_source(self,table):
pass
@abstractmethod
def dump(self):
pass
def is_instruction(self):
return False
def is_label(self):
return False
def is_directive(self):
return False
def is_comment(self):
return False
def collect_statistics(self,stats):
# does nothing by default
pass | 19.52 | 39 | 0.616803 | from abc import ABCMeta, abstractmethod
class Assembly(metaclass=ABCMeta):
@abstractmethod
def to_source(self,table):
pass
@abstractmethod
def dump(self):
pass
def is_instruction(self):
return False
def is_label(self):
return False
def is_directive(self):
return False
def is_comment(self):
return False
def collect_statistics(self,stats):
pass | true | true |
f71d46d8cffb81704bfaaac606134000be1c25b8 | 1,201 | py | Python | kelbyapp/public/forms.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | kelbyapp/public/forms.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | kelbyapp/public/forms.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from kelbyapp.user.models import User
class LoginForm(Form):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| 30.025 | 77 | 0.6403 |
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from kelbyapp.user.models import User
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| true | true |
f71d49b5ea3af6b2cf6a8645afbe5f8a1d710275 | 5,786 | py | Python | main.py | hamhochoisg/moneydetection | 32a02f54a4a0c1a6f41a232fa30a3f0f15bdab13 | [
"CC0-1.0"
] | 1 | 2021-09-25T05:53:08.000Z | 2021-09-25T05:53:08.000Z | main.py | hamhochoisg/moneydetection | 32a02f54a4a0c1a6f41a232fa30a3f0f15bdab13 | [
"CC0-1.0"
] | null | null | null | main.py | hamhochoisg/moneydetection | 32a02f54a4a0c1a6f41a232fa30a3f0f15bdab13 | [
"CC0-1.0"
] | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from tensorflow.keras.preprocessing import image
st.title('Banknotes Classification')
menu = ['Home','Up Load & Predict', 'Capture From Webcam']
#========================#
#==== Function #=========#
Model_Path = 'model\my_model_checkpoint.h5'
class_names = ['1000', '10000', '100000', '2000', '20000', '200000', '5000', '50000', '500000']
def get_saved_model(Model_Path):
# Learning Rate maybe decrease so quick => start with 0.01
restored_model = tf.keras.models.load_model(Model_Path)
# Show the model architecture
# restored_model.summary() #print in terminal
return restored_model
def predict_image(image_path): #input and image show prediction label, reutrn string value of prediction
model = get_saved_model(Model_Path)
#Preprocess image:
img = image.load_img(image_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0) #predict nhận theo batch (1,224,224,3)
#Prediction:
prediction = model.predict(img_array)
index = prediction.argmax()
l = list(prediction)
tmp_percent = l[0][index]*100
pred = class_names[index]
st.write('model prediction:')
st.write(pred)
st.write('Model Propotion:')
st.write(tmp_percent)
def predict_image_array(img_array): #input and image array with shape = (1,224,224,3) show prediction label, reutrn string value of prediction
model = get_saved_model(Model_Path)
prediction = model.predict(img_array)
index = prediction.argmax()
l = list(prediction)
tmp_percent = l[0][index]*100
pred = class_names[index]
st.write('model prediction:')
st.write(pred)
st.write('Model Propotion:')
st.write(tmp_percent)
print(l)
return l,index
#========================#
choice = st.sidebar.selectbox('Danh mục', menu)
if choice == 'Home':
st.title('This is Home Page')
st.write('Xin chào, đây là ứng dụng phân loại tiền')
# Get The current Path
current_path = os.getcwd()
st.write('current path:')
st.write(current_path)
#Load Model
st.write('This is our model:')
# model = get_saved_model(Model_Path)
test_image_path = "media\\test\\500000\\Sự-thật-về-cách-đoán-3-số-Seri-tiền-500k-200k-100k-50k-20k-10k.jpg"
#Show Image
st.write('For Example Below Image')
st.image(test_image_path,use_column_width='auto')
st.write("Model Can Understand This Value")
#Prediction:
# predict_image(test_image_path)
elif choice == 'Up Load & Predict':
st.title('Please Upload Your Banknotes Image, I Can Understand it:')
photo_uploaded = st.file_uploader('Choose your banknotes photo', ['png', 'jpg', 'jpeg'])
if photo_uploaded != None:
image_np = np.asarray(bytearray(photo_uploaded.read()), dtype=np.uint8)
# print(image_np)
# print(image_np.shape)
img = cv2.imdecode(image_np, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print(img.shape)
st.image(img)
st.write(photo_uploaded.size)
st.write(photo_uploaded.type)
#Then Predict it
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img_array = np.expand_dims(img, axis=0)
# print(img_array.shape)
print(type(img))
predict_image_array(img_array)
elif choice == 'Capture From Webcam':
cap = cv2.VideoCapture(0) # device 0
run = st.checkbox('Show Webcam')
capture_button = st.checkbox('Campture')
quit_button = st.checkbox('Quit')
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
FRAME_WINDOW = st.image([])
# Keep reading images from webcam until press 'q'
while run:
ret, frame = cap.read()
# Display Webcam
# cv2.imshow('My App!', frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB ) #Convert màu cho đúng
FRAME_WINDOW.image(frame)
if capture_button: # press "c" => capture
# save the current frame and predict
cap.release() # Thử release ra liền để lấy cái hình hiện tại
cv2.destroyAllWindows()
print('Frame shape',frame.shape)
captured_image = frame
# captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB ) #Đã convert ở trên rồi
st.image(captured_image)
st.write('Model is predicting it:')
captured_image = cv2.resize(captured_image, (224,224))
img_array = np.expand_dims(captured_image, axis=0)
predict_image_array(img_array)
run = False
capture_button = False
if quit_button: # press "q" => quit
run = False
capture_button = False
quit_button = False
# break
cap.release()
cv2.destroyAllWindows()
# if captured_image.shape != None:
# captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB )
# st.write('Image That Captured')
# st.image(captured_image)
# captured_image = cv2.resize(captured_image, (224,224))
# if captured_image.shape != None:
# st.write('Image That Captured')
# st.image(captured_image)
# captured_image = cv2.resize(captured_image, (224,224))
# print('Captured Image Shape:',captured_image.shape)
# print('Captured Image Type:',type(captured_image))
# img_array = np.expand_dims(captured_image, axis=0)
# predict_image_array(img_array)
| 32.144444 | 142 | 0.635154 | import streamlit as st
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from tensorflow.keras.preprocessing import image
st.title('Banknotes Classification')
menu = ['Home','Up Load & Predict', 'Capture From Webcam']
= 'model\my_model_checkpoint.h5'
class_names = ['1000', '10000', '100000', '2000', '20000', '200000', '5000', '50000', '500000']
def get_saved_model(Model_Path):
restored_model = tf.keras.models.load_model(Model_Path)
ed_model
def predict_image(image_path):
model = get_saved_model(Model_Path)
img = image.load_img(image_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_array)
index = prediction.argmax()
l = list(prediction)
tmp_percent = l[0][index]*100
pred = class_names[index]
st.write('model prediction:')
st.write(pred)
st.write('Model Propotion:')
st.write(tmp_percent)
def predict_image_array(img_array):
model = get_saved_model(Model_Path)
prediction = model.predict(img_array)
index = prediction.argmax()
l = list(prediction)
tmp_percent = l[0][index]*100
pred = class_names[index]
st.write('model prediction:')
st.write(pred)
st.write('Model Propotion:')
st.write(tmp_percent)
print(l)
return l,index
choice = st.sidebar.selectbox('Danh mục', menu)
if choice == 'Home':
st.title('This is Home Page')
st.write('Xin chào, đây là ứng dụng phân loại tiền')
current_path = os.getcwd()
st.write('current path:')
st.write(current_path)
st.write('This is our model:')
test_image_path = "media\\test\\500000\\Sự-thật-về-cách-đoán-3-số-Seri-tiền-500k-200k-100k-50k-20k-10k.jpg"
st.write('For Example Below Image')
st.image(test_image_path,use_column_width='auto')
st.write("Model Can Understand This Value")
elif choice == 'Up Load & Predict':
st.title('Please Upload Your Banknotes Image, I Can Understand it:')
photo_uploaded = st.file_uploader('Choose your banknotes photo', ['png', 'jpg', 'jpeg'])
if photo_uploaded != None:
image_np = np.asarray(bytearray(photo_uploaded.read()), dtype=np.uint8)
img = cv2.imdecode(image_np, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print(img.shape)
st.image(img)
st.write(photo_uploaded.size)
st.write(photo_uploaded.type)
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img_array = np.expand_dims(img, axis=0)
print(type(img))
predict_image_array(img_array)
elif choice == 'Capture From Webcam':
cap = cv2.VideoCapture(0)
run = st.checkbox('Show Webcam')
capture_button = st.checkbox('Campture')
quit_button = st.checkbox('Quit')
if not cap.isOpened():
raise IOError("Cannot open webcam")
FRAME_WINDOW = st.image([])
while run:
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB )
FRAME_WINDOW.image(frame)
if capture_button:
cap.release()
cv2.destroyAllWindows()
print('Frame shape',frame.shape)
captured_image = frame
t.image(captured_image)
st.write('Model is predicting it:')
captured_image = cv2.resize(captured_image, (224,224))
img_array = np.expand_dims(captured_image, axis=0)
predict_image_array(img_array)
run = False
capture_button = False
if quit_button:
run = False
capture_button = False
quit_button = False
cap.release()
cv2.destroyAllWindows()
| true | true |
f71d49f08968408a9f9195a8e3883b923d68c5b2 | 15,027 | py | Python | assets/code/taiga/conf/local.py | tcarrio/cit348-devops | e22f1c40c07608fcf9b4634207d37087194e35c5 | [
"MIT"
] | null | null | null | assets/code/taiga/conf/local.py | tcarrio/cit348-devops | e22f1c40c07608fcf9b4634207d37087194e35c5 | [
"MIT"
] | null | null | null | assets/code/taiga/conf/local.py | tcarrio/cit348-devops | e22f1c40c07608fcf9b4634207d37087194e35c5 | [
"MIT"
] | null | null | null | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path, sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APPEND_SLASH = False
ALLOWED_HOSTS = ["*"]
ADMINS = (
("Admin", "example@example.com"),
)
#DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.postgresql_psycopg2",
# "NAME": "taiga",
# "HOST": "postgres",
# "USER": "taiga",
# "PASSWORD": "thisisthetaigapassword",
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'taiga',
'USER': 'taiga',
'PASSWORD': 'r7OUeKlQ',
'HOST': 'tom.ou.carrio.me',
'PORT': '5432',
}
}
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake"
}
}
PASSWORD_HASHERS = [
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
]
# Default configuration for reverse proxy
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
# Errors report configuration
SEND_BROKEN_LINK_EMAILS = True
IGNORABLE_404_ENDS = (".php", ".cgi")
IGNORABLE_404_STARTS = ("/phpmyadmin/",)
ATOMIC_REQUESTS = True
TIME_ZONE = "UTC"
LOGIN_URL="/auth/login/"
USE_TZ = True
USE_I18N = True
USE_L10N = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
#("af", "Afrikaans"), # Afrikaans
#("ar", "العربية"), # Arabic
#("ast", "Asturiano"), # Asturian
#("az", "Azərbaycan dili"), # Azerbaijani
#("bg", "Български"), # Bulgarian
#("be", "Беларуская"), # Belarusian
#("bn", "বাংলা"), # Bengali
#("br", "Bretón"), # Breton
#("bs", "Bosanski"), # Bosnian
("ca", "Català"), # Catalan
#("cs", "Čeština"), # Czech
#("cy", "Cymraeg"), # Welsh
#("da", "Dansk"), # Danish
("de", "Deutsch"), # German
#("el", "Ελληνικά"), # Greek
("en", "English (US)"), # English
#("en-au", "English (Australia)"), # Australian English
#("en-gb", "English (UK)"), # British English
#("eo", "esperanta"), # Esperanto
("es", "Español"), # Spanish
#("es-ar", "Español (Argentina)"), # Argentinian Spanish
#("es-mx", "Español (México)"), # Mexican Spanish
#("es-ni", "Español (Nicaragua)"), # Nicaraguan Spanish
#("es-ve", "Español (Venezuela)"), # Venezuelan Spanish
#("et", "Eesti"), # Estonian
#("eu", "Euskara"), # Basque
#("fa", "فارسی"), # Persian
("fi", "Suomi"), # Finnish
("fr", "Français"), # French
#("fy", "Frysk"), # Frisian
#("ga", "Irish"), # Irish
#("gl", "Galego"), # Galician
#("he", "עברית"), # Hebrew
#("hi", "हिन्दी"), # Hindi
#("hr", "Hrvatski"), # Croatian
#("hu", "Magyar"), # Hungarian
#("ia", "Interlingua"), # Interlingua
#("id", "Bahasa Indonesia"), # Indonesian
#("io", "IDO"), # Ido
#("is", "Íslenska"), # Icelandic
#("it", "Italiano"), # Italian
#("ja", "日本語"), # Japanese
#("ka", "ქართული"), # Georgian
#("kk", "Қазақша"), # Kazakh
#("km", "ភាសាខ្មែរ"), # Khmer
#("kn", "ಕನ್ನಡ"), # Kannada
#("ko", "한국어"), # Korean
#("lb", "Lëtzebuergesch"), # Luxembourgish
#("lt", "Lietuvių"), # Lithuanian
#("lv", "Latviešu"), # Latvian
#("mk", "Македонски"), # Macedonian
#("ml", "മലയാളം"), # Malayalam
#("mn", "Монгол"), # Mongolian
#("mr", "मराठी"), # Marathi
#("my", "မြန်မာ"), # Burmese
#("nb", "Norsk (bokmål)"), # Norwegian Bokmal
#("ne", "नेपाली"), # Nepali
("nl", "Nederlands"), # Dutch
#("nn", "Norsk (nynorsk)"), # Norwegian Nynorsk
#("os", "Ирон æвзаг"), # Ossetic
#("pa", "ਪੰਜਾਬੀ"), # Punjabi
#("pl", "Polski"), # Polish
#("pt", "Português (Portugal)"), # Portuguese
#("pt-br", "Português (Brasil)"), # Brazilian Portuguese
#("ro", "Română"), # Romanian
#("ru", "Русский"), # Russian
#("sk", "Slovenčina"), # Slovak
#("sl", "Slovenščina"), # Slovenian
#("sq", "Shqip"), # Albanian
#("sr", "Српски"), # Serbian
#("sr-latn", "srpski"), # Serbian Latin
#("sv", "Svenska"), # Swedish
#("sw", "Kiswahili"), # Swahili
#("ta", "தமிழ்"), # Tamil
#("te", "తెలుగు"), # Telugu
#("th", "ภาษาไทย"), # Thai
#("tr", "Türkçe"), # Turkish
#("tt", "татар теле"), # Tatar
#("udm", "удмурт кыл"), # Udmurt
#("uk", "Українська"), # Ukrainian
#("ur", "اردو"), # Urdu
#("vi", "Tiếng Việt"), # Vietnamese
#("zh-hans", "中文(简体)"), # Simplified Chinese
("zh-hant", "中文(香港)"), # Traditional Chinese
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
LOCALE_PATHS = (
os.path.join(BASE_DIR, "locale"),
os.path.join(BASE_DIR, "taiga", "locale"),
)
SITES = {
"api": {"domain": "localhost:8000", "scheme": "http", "name": "api"},
"front": {"domain": "localhost:9001", "scheme": "http", "name": "front"},
}
SITE_ID = "api"
# Session configuration (only used for admin)
SESSION_ENGINE = "django.contrib.sessions.backends.db"
SESSION_COOKIE_AGE = 1209600 # (2 weeks)
# MAIL OPTIONS
DEFAULT_FROM_EMAIL = "john@doe.com"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DJMAIL_REAL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DJMAIL_SEND_ASYNC = True
DJMAIL_MAX_RETRY_NUMBER = 3
DJMAIL_TEMPLATE_EXTENSION = "jinja"
# Events backend
EVENTS_PUSH_BACKEND = "taiga.events.backends.postgresql.EventsPushBackend"
# EVENTS_PUSH_BACKEND = "taiga.events.backends.rabbitmq.EventsPushBackend"
# EVENTS_PUSH_BACKEND_OPTIONS = {"url": "//guest:guest@127.0.0.1/"}
# Message System
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
# The absolute url is mandatory because attachments
# urls depends on it. On production should be set
# something like https://media.taiga.io/
MEDIA_URL = "http://" + os.getenv("API_NAME") + "/media/"
# Static url is not widelly used by taiga (only
# if admin is activated).
STATIC_URL = "http://" + os.getenv("API_NAME") + "/static/"
# Static configuration.
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Don't forget to use absolute paths, not relative paths.
)
# Defautl storage
DEFAULT_FILE_STORAGE = "taiga.base.storage.FileSystemStorage"
SECRET_KEY = "aw3+t2r(8(0kkrhg8)gx6i96v5^kv%6cfep9wxfom0%7dy0m9e"
TEMPLATE_LOADERS = [
"django_jinja.loaders.AppLoader",
"django_jinja.loaders.FileSystemLoader",
]
MIDDLEWARE_CLASSES = [
"taiga.base.middleware.cors.CoorsMiddleware",
"taiga.events.middleware.SessionIDMiddleware",
# Common middlewares
"django.middleware.common.CommonMiddleware",
"django.middleware.locale.LocaleMiddleware",
# Only needed by django admin
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
]
ROOT_URLCONF = "taiga.urls"
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, "templates"),
]
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.admin",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
"taiga.base",
"taiga.base.api",
"taiga.locale",
"taiga.events",
"taiga.front",
"taiga.users",
"taiga.userstorage",
"taiga.projects",
"taiga.projects.references",
"taiga.projects.custom_attributes",
"taiga.projects.history",
"taiga.projects.notifications",
"taiga.projects.attachments",
"taiga.projects.votes",
"taiga.projects.milestones",
"taiga.projects.userstories",
"taiga.projects.tasks",
"taiga.projects.issues",
"taiga.projects.wiki",
"taiga.searches",
"taiga.timeline",
"taiga.mdrender",
"taiga.export_import",
"taiga.feedback",
"taiga.stats",
"taiga.hooks.github",
"taiga.hooks.gitlab",
"taiga.hooks.bitbucket",
"taiga.webhooks",
"djmail",
"django_jinja",
"django_jinja.contrib._humanize",
"sr",
"easy_thumbnails",
"raven.contrib.django.raven_compat",
"django_transactional_cleanup",
]
WSGI_APPLICATION = "taiga.wsgi.application"
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"formatters": {
"complete": {
"format": "%(levelname)s:%(asctime)s:%(module)s %(message)s"
},
"simple": {
"format": "%(levelname)s:%(asctime)s: %(message)s"
},
"null": {
"format": "%(message)s",
},
},
"handlers": {
"null": {
"level":"DEBUG",
"class":"django.utils.log.NullHandler",
},
"console":{
"level":"DEBUG",
"class":"logging.StreamHandler",
"formatter": "simple",
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django": {
"handlers":["null"],
"propagate": True,
"level":"INFO",
},
"django.request": {
"handlers": ["mail_admins", "console"],
"level": "ERROR",
"propagate": False,
},
"taiga": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
}
}
}
AUTH_USER_MODEL = "users.User"
FORMAT_MODULE_PATH = "taiga.base.formats"
DATE_INPUT_FORMATS = (
"%Y-%m-%d", "%m/%d/%Y", "%d/%m/%Y", "%b %d %Y",
"%b %d, %Y", "%d %b %Y", "%d %b, %Y", "%B %d %Y",
"%B %d, %Y", "%d %B %Y", "%d %B, %Y"
)
# Authentication settings (only for django admin)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend", # default
)
MAX_AGE_AUTH_TOKEN = None
MAX_AGE_CANCEL_ACCOUNT = 30 * 24 * 60 * 60 # 30 days in seconds
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
# Mainly used by taiga-front
"taiga.auth.backends.Token",
# Mainly used for api debug.
"taiga.auth.backends.Session",
),
"DEFAULT_THROTTLE_CLASSES": (
"taiga.base.throttling.AnonRateThrottle",
"taiga.base.throttling.UserRateThrottle"
),
"DEFAULT_THROTTLE_RATES": {
"anon": None,
"user": None,
"import-mode": None,
"import-dump-mode": "1/minute",
},
"FILTER_BACKEND": "taiga.base.filters.FilterBackend",
"EXCEPTION_HANDLER": "taiga.base.exceptions.exception_handler",
"PAGINATE_BY": 30,
"PAGINATE_BY_PARAM": "page_size",
"MAX_PAGINATE_BY": 1000,
"DATETIME_FORMAT": "%Y-%m-%dT%H:%M:%S%z"
}
DEFAULT_PROJECT_TEMPLATE = "scrum"
PUBLIC_REGISTER_ENABLED = False
SEARCHES_MAX_RESULTS = 150
SOUTH_MIGRATION_MODULES = {
'easy_thumbnails': 'easy_thumbnails.south_migrations',
}
DEFAULT_AVATAR_SIZE = 80 # 80x80 pixels
DEFAULT_BIG_AVATAR_SIZE = 300 # 300x300 pixels
DEFAULT_TIMELINE_IMAGE_SIZE = 640 # 640x??? pixels
THUMBNAIL_ALIASES = {
'': {
'avatar': {'size': (DEFAULT_AVATAR_SIZE, DEFAULT_AVATAR_SIZE), 'crop': True},
'big-avatar': {'size': (DEFAULT_BIG_AVATAR_SIZE, DEFAULT_BIG_AVATAR_SIZE), 'crop': True},
'timeline-image': {'size': (DEFAULT_TIMELINE_IMAGE_SIZE, 0), 'crop': True},
},
}
# GRAVATAR_DEFAULT_AVATAR = "img/user-noimage.png"
GRAVATAR_DEFAULT_AVATAR = ""
GRAVATAR_AVATAR_SIZE = DEFAULT_AVATAR_SIZE
TAGS_PREDEFINED_COLORS = ["#fce94f", "#edd400", "#c4a000", "#8ae234",
"#73d216", "#4e9a06", "#d3d7cf", "#fcaf3e",
"#f57900", "#ce5c00", "#729fcf", "#3465a4",
"#204a87", "#888a85", "#ad7fa8", "#75507b",
"#5c3566", "#ef2929", "#cc0000", "#a40000",
"#2e3436",]
# Feedback module settings
FEEDBACK_ENABLED = True
FEEDBACK_EMAIL = "support@taiga.io"
# Stats module settings
STATS_ENABLED = False
# 0 notifications will work in a synchronous way
# >0 an external process will check the pending notifications and will send them
# collapsed during that interval
CHANGE_NOTIFICATIONS_MIN_INTERVAL = 0 #seconds
# List of functions called for filling correctly the ProjectModulesConfig associated to a project
# This functions should receive a Project parameter and return a dict with the desired configuration
PROJECT_MODULES_CONFIGURATORS = {
"github": "taiga.hooks.github.services.get_or_generate_config",
"gitlab": "taiga.hooks.gitlab.services.get_or_generate_config",
"bitbucket": "taiga.hooks.bitbucket.services.get_or_generate_config",
}
BITBUCKET_VALID_ORIGIN_IPS = ["131.103.20.165", "131.103.20.166"]
GITLAB_VALID_ORIGIN_IPS = []
EXPORTS_TTL = 60 * 60 * 24 # 24 hours
CELERY_ENABLED = False
WEBHOOKS_ENABLED = False
# If is True /front/sitemap.xml show a valid sitemap of taiga-front client
FRONT_SITEMAP_ENABLED = False
FRONT_SITEMAP_CACHE_TIMEOUT = 24*60*60 # In second
from .sr import *
# NOTE: DON'T INSERT MORE SETTINGS AFTER THIS LINE
TEST_RUNNER="django.test.runner.DiscoverRunner"
if "test" in sys.argv:
print ("\033[1;91mNo django tests.\033[0m")
print ("Try: \033[1;33mpy.test\033[0m")
sys.exit(0)
| 30.419028 | 100 | 0.618886 |
import os.path, sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APPEND_SLASH = False
ALLOWED_HOSTS = ["*"]
ADMINS = (
("Admin", "example@example.com"),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'taiga',
'USER': 'taiga',
'PASSWORD': 'r7OUeKlQ',
'HOST': 'tom.ou.carrio.me',
'PORT': '5432',
}
}
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake"
}
}
PASSWORD_HASHERS = [
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
]
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
SEND_BROKEN_LINK_EMAILS = True
IGNORABLE_404_ENDS = (".php", ".cgi")
IGNORABLE_404_STARTS = ("/phpmyadmin/",)
ATOMIC_REQUESTS = True
TIME_ZONE = "UTC"
LOGIN_URL="/auth/login/"
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
"English (US)"),
omain": "localhost:9001", "scheme": "http", "name": "front"},
}
SITE_ID = "api"
SESSION_ENGINE = "django.contrib.sessions.backends.db"
SESSION_COOKIE_AGE = 1209600
DEFAULT_FROM_EMAIL = "john@doe.com"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DJMAIL_REAL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DJMAIL_SEND_ASYNC = True
DJMAIL_MAX_RETRY_NUMBER = 3
DJMAIL_TEMPLATE_EXTENSION = "jinja"
EVENTS_PUSH_BACKEND = "taiga.events.backends.postgresql.EventsPushBackend"
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
MEDIA_URL = "http://" + os.getenv("API_NAME") + "/media/"
STATIC_URL = "http://" + os.getenv("API_NAME") + "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = (
)
# Defautl storage
DEFAULT_FILE_STORAGE = "taiga.base.storage.FileSystemStorage"
SECRET_KEY = "aw3+t2r(8(0kkrhg8)gx6i96v5^kv%6cfep9wxfom0%7dy0m9e"
TEMPLATE_LOADERS = [
"django_jinja.loaders.AppLoader",
"django_jinja.loaders.FileSystemLoader",
]
MIDDLEWARE_CLASSES = [
"taiga.base.middleware.cors.CoorsMiddleware",
"taiga.events.middleware.SessionIDMiddleware",
# Common middlewares
"django.middleware.common.CommonMiddleware",
"django.middleware.locale.LocaleMiddleware",
# Only needed by django admin
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
]
ROOT_URLCONF = "taiga.urls"
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, "templates"),
]
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.admin",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
"taiga.base",
"taiga.base.api",
"taiga.locale",
"taiga.events",
"taiga.front",
"taiga.users",
"taiga.userstorage",
"taiga.projects",
"taiga.projects.references",
"taiga.projects.custom_attributes",
"taiga.projects.history",
"taiga.projects.notifications",
"taiga.projects.attachments",
"taiga.projects.votes",
"taiga.projects.milestones",
"taiga.projects.userstories",
"taiga.projects.tasks",
"taiga.projects.issues",
"taiga.projects.wiki",
"taiga.searches",
"taiga.timeline",
"taiga.mdrender",
"taiga.export_import",
"taiga.feedback",
"taiga.stats",
"taiga.hooks.github",
"taiga.hooks.gitlab",
"taiga.hooks.bitbucket",
"taiga.webhooks",
"djmail",
"django_jinja",
"django_jinja.contrib._humanize",
"sr",
"easy_thumbnails",
"raven.contrib.django.raven_compat",
"django_transactional_cleanup",
]
WSGI_APPLICATION = "taiga.wsgi.application"
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"formatters": {
"complete": {
"format": "%(levelname)s:%(asctime)s:%(module)s %(message)s"
},
"simple": {
"format": "%(levelname)s:%(asctime)s: %(message)s"
},
"null": {
"format": "%(message)s",
},
},
"handlers": {
"null": {
"level":"DEBUG",
"class":"django.utils.log.NullHandler",
},
"console":{
"level":"DEBUG",
"class":"logging.StreamHandler",
"formatter": "simple",
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django": {
"handlers":["null"],
"propagate": True,
"level":"INFO",
},
"django.request": {
"handlers": ["mail_admins", "console"],
"level": "ERROR",
"propagate": False,
},
"taiga": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
}
}
}
AUTH_USER_MODEL = "users.User"
FORMAT_MODULE_PATH = "taiga.base.formats"
DATE_INPUT_FORMATS = (
"%Y-%m-%d", "%m/%d/%Y", "%d/%m/%Y", "%b %d %Y",
"%b %d, %Y", "%d %b %Y", "%d %b, %Y", "%B %d %Y",
"%B %d, %Y", "%d %B %Y", "%d %B, %Y"
)
# Authentication settings (only for django admin)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend", # default
)
MAX_AGE_AUTH_TOKEN = None
MAX_AGE_CANCEL_ACCOUNT = 30 * 24 * 60 * 60 # 30 days in seconds
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
# Mainly used by taiga-front
"taiga.auth.backends.Token",
# Mainly used for api debug.
"taiga.auth.backends.Session",
),
"DEFAULT_THROTTLE_CLASSES": (
"taiga.base.throttling.AnonRateThrottle",
"taiga.base.throttling.UserRateThrottle"
),
"DEFAULT_THROTTLE_RATES": {
"anon": None,
"user": None,
"import-mode": None,
"import-dump-mode": "1/minute",
},
"FILTER_BACKEND": "taiga.base.filters.FilterBackend",
"EXCEPTION_HANDLER": "taiga.base.exceptions.exception_handler",
"PAGINATE_BY": 30,
"PAGINATE_BY_PARAM": "page_size",
"MAX_PAGINATE_BY": 1000,
"DATETIME_FORMAT": "%Y-%m-%dT%H:%M:%S%z"
}
DEFAULT_PROJECT_TEMPLATE = "scrum"
PUBLIC_REGISTER_ENABLED = False
SEARCHES_MAX_RESULTS = 150
SOUTH_MIGRATION_MODULES = {
'easy_thumbnails': 'easy_thumbnails.south_migrations',
}
DEFAULT_AVATAR_SIZE = 80 # 80x80 pixels
DEFAULT_BIG_AVATAR_SIZE = 300 # 300x300 pixels
DEFAULT_TIMELINE_IMAGE_SIZE = 640 # 640x??? pixels
THUMBNAIL_ALIASES = {
'': {
'avatar': {'size': (DEFAULT_AVATAR_SIZE, DEFAULT_AVATAR_SIZE), 'crop': True},
'big-avatar': {'size': (DEFAULT_BIG_AVATAR_SIZE, DEFAULT_BIG_AVATAR_SIZE), 'crop': True},
'timeline-image': {'size': (DEFAULT_TIMELINE_IMAGE_SIZE, 0), 'crop': True},
},
}
# GRAVATAR_DEFAULT_AVATAR = "img/user-noimage.png"
GRAVATAR_DEFAULT_AVATAR = ""
GRAVATAR_AVATAR_SIZE = DEFAULT_AVATAR_SIZE
TAGS_PREDEFINED_COLORS = ["#fce94f", "#edd400", "#c4a000", "#8ae234",
"#73d216", "#4e9a06", "#d3d7cf", "#fcaf3e",
"#f57900", "#ce5c00", "#729fcf", "#3465a4",
"#204a87", "#888a85", "#ad7fa8", "#75507b",
"#5c3566", "#ef2929", "#cc0000", "#a40000",
"#2e3436",]
# Feedback module settings
FEEDBACK_ENABLED = True
FEEDBACK_EMAIL = "support@taiga.io"
# Stats module settings
STATS_ENABLED = False
# 0 notifications will work in a synchronous way
# >0 an external process will check the pending notifications and will send them
# collapsed during that interval
CHANGE_NOTIFICATIONS_MIN_INTERVAL = 0 #seconds
# List of functions called for filling correctly the ProjectModulesConfig associated to a project
# This functions should receive a Project parameter and return a dict with the desired configuration
PROJECT_MODULES_CONFIGURATORS = {
"github": "taiga.hooks.github.services.get_or_generate_config",
"gitlab": "taiga.hooks.gitlab.services.get_or_generate_config",
"bitbucket": "taiga.hooks.bitbucket.services.get_or_generate_config",
}
BITBUCKET_VALID_ORIGIN_IPS = ["131.103.20.165", "131.103.20.166"]
GITLAB_VALID_ORIGIN_IPS = []
EXPORTS_TTL = 60 * 60 * 24 # 24 hours
CELERY_ENABLED = False
WEBHOOKS_ENABLED = False
# If is True /front/sitemap.xml show a valid sitemap of taiga-front client
FRONT_SITEMAP_ENABLED = False
FRONT_SITEMAP_CACHE_TIMEOUT = 24*60*60 # In second
from .sr import *
# NOTE: DON'T INSERT MORE SETTINGS AFTER THIS LINE
TEST_RUNNER="django.test.runner.DiscoverRunner"
if "test" in sys.argv:
print ("\033[1;91mNo django tests.\033[0m")
print ("Try: \033[1;33mpy.test\033[0m")
sys.exit(0)
| true | true |
f71d4c72d1f7a82364f42009fa23310a38a6270a | 3,806 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/django/core/cache/__init__.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/core/cache/__init__.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/core/cache/__init__.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | """
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should use the `cache` variable defined here to access the default
cache backend and look up non-default cache backends in the `caches` dict-like
object.
See docs/topics/cache.txt for information on the public API.
"""
from threading import local
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
BaseCache, CacheKeyWarning, InvalidCacheBackendError,
)
from django.utils.module_loading import import_string
__all__ = [
'cache', 'DEFAULT_CACHE_ALIAS', 'InvalidCacheBackendError',
'CacheKeyWarning', 'BaseCache',
]
DEFAULT_CACHE_ALIAS = 'default'
def _create_cache(backend, **kwargs):
try:
# Try to get the CACHES entry for the given backend name first
try:
conf = settings.CACHES[backend]
except KeyError:
try:
# Trying to import the given backend, in case it's a dotted path
import_string(backend)
except ImportError as e:
raise InvalidCacheBackendError("Could not find backend '%s': %s" % (
backend, e))
location = kwargs.pop('LOCATION', '')
params = kwargs
else:
params = conf.copy()
params.update(kwargs)
backend = params.pop('BACKEND')
location = params.pop('LOCATION', '')
backend_cls = import_string(backend)
except ImportError as e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
class CacheHandler(object):
"""
A Cache Handler to manage access to Cache instances.
Ensures only one instance of each alias exists per thread.
"""
def __init__(self):
self._caches = local()
def __getitem__(self, alias):
try:
return self._caches.caches[alias]
except AttributeError:
self._caches.caches = {}
except KeyError:
pass
if alias not in settings.CACHES:
raise InvalidCacheBackendError(
"Could not find config for '%s' in settings.CACHES" % alias
)
cache = _create_cache(alias)
self._caches.caches[alias] = cache
return cache
def all(self):
return getattr(self._caches, 'caches', {}).values()
caches = CacheHandler()
class DefaultCacheProxy(object):
"""
Proxy access to the default Cache object's attributes.
This allows the legacy `cache` object to be thread-safe using the new
``caches`` API.
"""
def __getattr__(self, name):
return getattr(caches[DEFAULT_CACHE_ALIAS], name)
def __setattr__(self, name, value):
return setattr(caches[DEFAULT_CACHE_ALIAS], name, value)
def __delattr__(self, name):
return delattr(caches[DEFAULT_CACHE_ALIAS], name)
def __contains__(self, key):
return key in caches[DEFAULT_CACHE_ALIAS]
def __eq__(self, other):
return caches[DEFAULT_CACHE_ALIAS] == other
def __ne__(self, other):
return caches[DEFAULT_CACHE_ALIAS] != other
cache = DefaultCacheProxy()
def close_caches(**kwargs):
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If not implemented in a particular backend
# cache.close is a no-op
for cache in caches.all():
cache.close()
signals.request_finished.connect(close_caches)
| 3,806 | 3,806 | 0.656332 | true | true | |
f71d4ceea4d304b20a6df92edf9fa956bd825f05 | 1,877 | py | Python | api_mihai/management/commands/london_data_importer.py | MihaiVisu/hons-backend | 95e9afc32289bc753d689c8465c991d1bcc164e0 | [
"MIT"
] | null | null | null | api_mihai/management/commands/london_data_importer.py | MihaiVisu/hons-backend | 95e9afc32289bc753d689c8465c991d1bcc164e0 | [
"MIT"
] | 15 | 2020-01-28T22:20:34.000Z | 2022-03-11T23:20:39.000Z | api_mihai/management/commands/london_data_importer.py | MihaiVisu/hons-backend | 95e9afc32289bc753d689c8465c991d1bcc164e0 | [
"MIT"
] | null | null | null | import csv
import calendar
import datetime
from django.core.management.base import BaseCommand, CommandError
from api_mihai.models import CollectedData
class Command(BaseCommand):
help = 'Imports the CSV file from the collected data to the database'
def add_arguments(self, parser):
parser.add_argument('file_name', type=str)
parser.add_argument('dataset', type=int)
# method that converts date in date format from the london data into phoneTimestamp
# for database records
@staticmethod
def __convert_date_to_timestamp(date):
d = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").utctimetuple()
return calendar.timegm(d)
# method that gets time as a string from the date in the format
# of the london data, for database records
@staticmethod
def __get_time_string(date):
return datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").time().strftime("%H:%M:%S")
def handle(self, *args, **options):
bin_vals = ['bin'+str(num) for num in range(0,16)]
with open(options['file_name'], 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')
for row in reader:
if row['temperature'] == "" or row['humidity'] == "":
continue
timestamp = self.__convert_date_to_timestamp(row['phoneTimestamp'])
time = self.__get_time_string(row['phoneTimestamp'])
feature = CollectedData(
phone_timestamp=timestamp,
pm1=float(row['pm1']),
pm2_5=float(row['pm2_5']),
pm10=float(row['pm10']),
temperature=float(row['temperature']),
humidity=float(row['humidity']),
latitude=float(row['gpsLatitude']),
longitude=float(row['gpsLongitude']),
time=time,
dataset_id=options['dataset'],
transport_label_id=row['environment_index'],
)
for val in bin_vals:
setattr(feature, val, row[val])
# save newly created feature
feature.save()
| 31.283333 | 96 | 0.696857 | import csv
import calendar
import datetime
from django.core.management.base import BaseCommand, CommandError
from api_mihai.models import CollectedData
class Command(BaseCommand):
help = 'Imports the CSV file from the collected data to the database'
def add_arguments(self, parser):
parser.add_argument('file_name', type=str)
parser.add_argument('dataset', type=int)
@staticmethod
def __convert_date_to_timestamp(date):
d = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").utctimetuple()
return calendar.timegm(d)
@staticmethod
def __get_time_string(date):
return datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f").time().strftime("%H:%M:%S")
def handle(self, *args, **options):
bin_vals = ['bin'+str(num) for num in range(0,16)]
with open(options['file_name'], 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')
for row in reader:
if row['temperature'] == "" or row['humidity'] == "":
continue
timestamp = self.__convert_date_to_timestamp(row['phoneTimestamp'])
time = self.__get_time_string(row['phoneTimestamp'])
feature = CollectedData(
phone_timestamp=timestamp,
pm1=float(row['pm1']),
pm2_5=float(row['pm2_5']),
pm10=float(row['pm10']),
temperature=float(row['temperature']),
humidity=float(row['humidity']),
latitude=float(row['gpsLatitude']),
longitude=float(row['gpsLongitude']),
time=time,
dataset_id=options['dataset'],
transport_label_id=row['environment_index'],
)
for val in bin_vals:
setattr(feature, val, row[val])
# save newly created feature
feature.save()
| true | true |
f71d4d2cd8852f1b8e62185fadfb4d3d807e03b4 | 7,873 | py | Python | rules/rule_functions.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | 2 | 2020-11-24T00:53:28.000Z | 2020-11-24T02:05:39.000Z | rules/rule_functions.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | null | null | null | rules/rule_functions.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | 2 | 2021-03-17T16:36:35.000Z | 2022-01-10T08:20:52.000Z | #rule_functions.py
#Copyright (c) 2020 Rachel Lea Ballantyne Draelos
#MIT License
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE
def delete_mainword(sentence, mainword, **kwargs): #Done with testing
if mainword not in sentence:
return False, sentence
return True, sentence.replace(mainword,'')
def delete_part(sentence, delete_part, mainword, **kwargs): #Done with testing
"""Delete all words in the sentence coming either <delete_part>='before'
or <delete_part>='after'"""
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
return True, senthalved[0]
if delete_part == 'before':
#if the word occurs more than once then we want to make sure we delete
#everything before the LAST occurrence
return True, senthalved[-1]
def delete_part_until(sentence, delete_part, mainword, until_hit, **kwargs): #Done with testing
"""Delete all words in the sentence coming either <delete_part>='before'
or <delete_part>='after' the <mainword> until you hit any words in the
list <until_hit>"""
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
keep = senthalved[0] #defintely keep before
dregs = senthalved[1] #you may keep some of 'after'
idx = len(dregs)
for u in until_hit:
d = dregs.find(u)
if d < idx and d!=-1:
idx = d
keep2 = dregs[idx:]
return True, keep+' '+keep2
if delete_part == 'before':
keep = senthalved[1]
dregs = senthalved[0]
idx = 0
for u in until_hit:
d = dregs.find(u)+len(u) #len(u) because we don't want to delete u
if d > idx and d!=-1:
idx = d
keep2 = dregs[0:idx]
return True, keep2+keep #don't need a space because one will already be included
def delete_entire_unless_immediate(sentence, mainword, position, wrange, unless_in, **kwargs): #Done with testing
"""Delete entire sentence if <mainword> is present, unless any of the words
in the list <unless_in> are present within <wrange> of position=='before' or
position=='after' the mainword in which case keep the entire sentence."""
if mainword not in sentence:
return False, sentence
if position == 'after':
if sentence.split()[-1]==mainword.strip(): #mainword is the last word so sentence can't be saved (no words after)
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[1].split()[0:wrange])
elif position == 'before':
if sentence.split()[0]==mainword.strip(): #mainword is the first word so sentence can't be saved (no words before)
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[0].split()[-1*wrange:])
#Check if any word in unless_in is a root of possible_save_word
saved = False
for u in unless_in:
if u in possible_save_words:
saved = True
if saved:
return False, sentence
else:
return True, ''
def delete(sentence, mainword, **kwargs): #Done with testing
"""Delete entire sentence if <mainword> is present"""
if mainword not in sentence:
return False, sentence
else:
return True, ''
def delete_if_first_word(sentence, mainword, **kwargs): #Done with testing
"""Delete entire sentence if exactly <mainword> is the first word"""
if mainword not in sentence: #e.g. if sentence=='' due to prior processing
return False, sentence
if mainword == sentence.split()[0]:
return True, ''
else:
return False, sentence
def delete_one_before_mainword(sentence, mainword, **kwargs):
"""Delete every word starting from (and including) one word before
<mainword>. Used in ambiguity detection e.g. 'there is scarring vs
atelectasis' -->mainword 'vs' --> 'there is' (delete both scarring and
atelectasis)"""
if mainword in sentence:
s = sentence.split(mainword)[0].split()
return True, (' ').join(s[0:-1])
else:
return False, sentence
def non_handling(sentence, mainword, **kwargs): #Done with testing
"""Delete any word that starts with 'non' or delete any word that comes
immediately after the standalone word 'non'. Prevents the term search
from making mistakes on words like noncalcified, nontuberculous,
noninfectious, etc."""
if 'non' not in sentence:
return False, sentence
else:
sentlist = sentence.split()
if ' non ' in sentence: #i.e., standalone word ' non '
idx = sentlist.index('non')
return True, ' '+' '.join(sentlist[0:idx]+sentlist[idx+2:])+' '
else: #non is prefixing another word
for word in sentlist:
if 'non' in word:
sentlist.remove(word)
return True, ' '+' '.join(sentlist)+' '
def patent_handling(sentence, mainword, **kwargs): #Done with testing
"""Function for handling the word 'patent' """
assert mainword==' patent'
if 'patent' not in sentence:
return False, sentence
sentlist = sentence.split()
if sentlist[0]=='patent':
return delete_part_until(sentence, delete_part = 'after',mainword = 'patent', until_hit = ['status','with'])
else: #patent is at the middle or the end of the sentence
return delete_part(sentence, delete_part = 'before',mainword = 'patent')
def clear_handling(sentence, mainword, **kwargs): #Done with testing
"""Function for handling the word 'clear' """
assert mainword==' clear'
if ' clear' not in sentence:
return False, sentence
changed1, sentence = delete_part(sentence, delete_part='before',mainword=mainword)
sentence = ' clear '+sentence #must keep word 'clear' at the beginning of the fragment so that the next step can work
changed2, sentence = delete_part_until(sentence, delete_part='after',mainword=mainword,until_hit=['status'])
return (changed1 or changed2), sentence
def subcentimeter_handling(sentence, mainword, **kwargs): #Done with testing
"""Example:
'a few scattered subcentimeter lymph nodes are visualized not
significantly changed from prior' --> 'a few scattered are visualized not
significantly changed from prior'
"""
assert mainword==' subcentimeter'
if mainword not in sentence:
return False, sentence
if 'node' in ' '.join(sentence.split(mainword)[1:]):
pre_idx = sentence.rfind(' subcentimeter')
pre = sentence[0:pre_idx]
post_idx = sentence.rfind('node')+len('node')
post = sentence[post_idx:]
sentence = pre+post
return True, sentence
else:
return False, sentence | 44.480226 | 122 | 0.668995 |
def delete_mainword(sentence, mainword, **kwargs):
if mainword not in sentence:
return False, sentence
return True, sentence.replace(mainword,'')
def delete_part(sentence, delete_part, mainword, **kwargs):
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
return True, senthalved[0]
if delete_part == 'before':
return True, senthalved[-1]
def delete_part_until(sentence, delete_part, mainword, until_hit, **kwargs):
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
keep = senthalved[0]
dregs = senthalved[1]
idx = len(dregs)
for u in until_hit:
d = dregs.find(u)
if d < idx and d!=-1:
idx = d
keep2 = dregs[idx:]
return True, keep+' '+keep2
if delete_part == 'before':
keep = senthalved[1]
dregs = senthalved[0]
idx = 0
for u in until_hit:
d = dregs.find(u)+len(u)
if d > idx and d!=-1:
idx = d
keep2 = dregs[0:idx]
return True, keep2+keep #don't need a space because one will already be included
def delete_entire_unless_immediate(sentence, mainword, position, wrange, unless_in, **kwargs):
if mainword not in sentence:
return False, sentence
if position == 'after':
if sentence.split()[-1]==mainword.strip():
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[1].split()[0:wrange])
elif position == 'before':
if sentence.split()[0]==mainword.strip(): #mainword is the first word so sentence can't be saved (no words before)
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[0].split()[-1*wrange:])
saved = False
for u in unless_in:
if u in possible_save_words:
saved = True
if saved:
return False, sentence
else:
return True, ''
def delete(sentence, mainword, **kwargs):
if mainword not in sentence:
return False, sentence
else:
return True, ''
def delete_if_first_word(sentence, mainword, **kwargs):
if mainword not in sentence:
return False, sentence
if mainword == sentence.split()[0]:
return True, ''
else:
return False, sentence
def delete_one_before_mainword(sentence, mainword, **kwargs):
if mainword in sentence:
s = sentence.split(mainword)[0].split()
return True, (' ').join(s[0:-1])
else:
return False, sentence
def non_handling(sentence, mainword, **kwargs):
if 'non' not in sentence:
return False, sentence
else:
sentlist = sentence.split()
if ' non ' in sentence:
idx = sentlist.index('non')
return True, ' '+' '.join(sentlist[0:idx]+sentlist[idx+2:])+' '
else:
for word in sentlist:
if 'non' in word:
sentlist.remove(word)
return True, ' '+' '.join(sentlist)+' '
def patent_handling(sentence, mainword, **kwargs):
assert mainword==' patent'
if 'patent' not in sentence:
return False, sentence
sentlist = sentence.split()
if sentlist[0]=='patent':
return delete_part_until(sentence, delete_part = 'after',mainword = 'patent', until_hit = ['status','with'])
else:
return delete_part(sentence, delete_part = 'before',mainword = 'patent')
def clear_handling(sentence, mainword, **kwargs):
assert mainword==' clear'
if ' clear' not in sentence:
return False, sentence
changed1, sentence = delete_part(sentence, delete_part='before',mainword=mainword)
sentence = ' clear '+sentence
changed2, sentence = delete_part_until(sentence, delete_part='after',mainword=mainword,until_hit=['status'])
return (changed1 or changed2), sentence
def subcentimeter_handling(sentence, mainword, **kwargs):
assert mainword==' subcentimeter'
if mainword not in sentence:
return False, sentence
if 'node' in ' '.join(sentence.split(mainword)[1:]):
pre_idx = sentence.rfind(' subcentimeter')
pre = sentence[0:pre_idx]
post_idx = sentence.rfind('node')+len('node')
post = sentence[post_idx:]
sentence = pre+post
return True, sentence
else:
return False, sentence | true | true |
f71d4d46c8227bfceeb5fb3080123438f7335d8f | 230 | py | Python | post-processors/CSDN/main.py | kingking888/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 6 | 2019-07-05T02:47:54.000Z | 2021-05-03T08:33:28.000Z | post-processors/CSDN/main.py | SpanockLau/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 8 | 2020-08-06T03:34:38.000Z | 2022-02-26T15:22:28.000Z | post-processors/CSDN/main.py | SpanockLau/SearchEngine | 83729fcc4e872277c7eaeb6d26ce2c3e425ef6a2 | [
"MIT"
] | 4 | 2019-07-05T08:03:51.000Z | 2019-10-05T06:48:24.000Z | from common.DataFilterHandler import DataFilterHandler
if __name__ == '__main__':
# first filter the data
filter = DataFilterHandler(database_name='TechHub', collection_name='CSDN', use_localhost=False)
filter.start() | 38.333333 | 100 | 0.769565 | from common.DataFilterHandler import DataFilterHandler
if __name__ == '__main__':
filter = DataFilterHandler(database_name='TechHub', collection_name='CSDN', use_localhost=False)
filter.start() | true | true |
f71d4f324eb82f6fada708be05c43ff1d35f6fbe | 1,562 | py | Python | aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/DescribeSmartContractJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/DescribeSmartContractJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/DescribeSmartContractJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbaas.endpoint import endpoint_data
class DescribeSmartContractJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'DescribeSmartContractJobs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Start(self):
return self.get_body_params().get('Start')
def set_Start(self,Start):
self.add_body_params('Start', Start)
def get_Size(self):
return self.get_body_params().get('Size')
def set_Size(self,Size):
self.add_body_params('Size', Size) | 35.5 | 79 | 0.759923 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbaas.endpoint import endpoint_data
class DescribeSmartContractJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'DescribeSmartContractJobs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Start(self):
return self.get_body_params().get('Start')
def set_Start(self,Start):
self.add_body_params('Start', Start)
def get_Size(self):
return self.get_body_params().get('Size')
def set_Size(self,Size):
self.add_body_params('Size', Size) | true | true |
f71d500bd9d2ec3c7c795af9f44cae78b7f74e64 | 1,718 | py | Python | Pi code/final.py | k-shenbagaraj/GoCHART_manual_drive | 524594fdd10f0a3d07020985f27727076cbbd01a | [
"MIT"
] | null | null | null | Pi code/final.py | k-shenbagaraj/GoCHART_manual_drive | 524594fdd10f0a3d07020985f27727076cbbd01a | [
"MIT"
] | null | null | null | Pi code/final.py | k-shenbagaraj/GoCHART_manual_drive | 524594fdd10f0a3d07020985f27727076cbbd01a | [
"MIT"
] | null | null | null | import re
from client import *
import serial
import os
if os.path.exists ('/dev/ttyACM0') == True:
port = "/dev/ttyACM0"
print("ACM0")
elif os.path.exists ('/dev/ttyACM1') == True:
port = "/dev/ttyACM1"
print("ACM1")
elif os.path.exists ('/dev/ttyACM2') == True:
port = "/dev/ttyACM2"
print("ACM2")
baudrate = 9600
ser =serial.Serial(port, baudrate)
msg = 0
received = 0
def send(msg):
msg = str(msg)
msg = msg + '\n'
x = msg.encode('ascii')
ser.write(x)
def receive():
global received
received = ser.read_until()
return received
while True:
joystick_val = socket_value()
joystick_val = str(joystick_val)
#print(joystick_val)
joystick_split = re.split(r'\s',joystick_val)
#rec = receive()
#print(rec)
joy = joystick_split[1]
joy =joy.replace(',','')
joy =joy.replace(' ','')
joy =joy.replace("'","")
joy = (float(joy))
print(joy,"joy")
if (joy>3000):
joy=3000
elif (joy<-3000):
joy=-3000
joy = int(float(joy/60)+50)
print(joy,"joy")
acc = joystick_split[3]
acc =acc.replace(',','')
acc =acc.replace(' ','')
acc =acc.replace("'","")
acc = int(float(acc)*10)
acc = (200-(acc+100))
print(acc,"acc")
button = joystick_split[7]
button =button.replace(',','')
button =button.replace(' ','')
button =button.replace("'","")
button =button.replace('}',"")
print(button)
button = int(float(button))
send_val = 10000000 + button*1000000+ acc*1000+ joy
print(send_val)
send(send_val)
print("sent")
#send(joystick_split[2])
#rec = receive()
| 19.976744 | 56 | 0.558789 | import re
from client import *
import serial
import os
if os.path.exists ('/dev/ttyACM0') == True:
port = "/dev/ttyACM0"
print("ACM0")
elif os.path.exists ('/dev/ttyACM1') == True:
port = "/dev/ttyACM1"
print("ACM1")
elif os.path.exists ('/dev/ttyACM2') == True:
port = "/dev/ttyACM2"
print("ACM2")
baudrate = 9600
ser =serial.Serial(port, baudrate)
msg = 0
received = 0
def send(msg):
msg = str(msg)
msg = msg + '\n'
x = msg.encode('ascii')
ser.write(x)
def receive():
global received
received = ser.read_until()
return received
while True:
joystick_val = socket_value()
joystick_val = str(joystick_val)
joystick_split = re.split(r'\s',joystick_val)
joy = joystick_split[1]
joy =joy.replace(',','')
joy =joy.replace(' ','')
joy =joy.replace("'","")
joy = (float(joy))
print(joy,"joy")
if (joy>3000):
joy=3000
elif (joy<-3000):
joy=-3000
joy = int(float(joy/60)+50)
print(joy,"joy")
acc = joystick_split[3]
acc =acc.replace(',','')
acc =acc.replace(' ','')
acc =acc.replace("'","")
acc = int(float(acc)*10)
acc = (200-(acc+100))
print(acc,"acc")
button = joystick_split[7]
button =button.replace(',','')
button =button.replace(' ','')
button =button.replace("'","")
button =button.replace('}',"")
print(button)
button = int(float(button))
send_val = 10000000 + button*1000000+ acc*1000+ joy
print(send_val)
send(send_val)
print("sent")
#send(joystick_split[2])
#rec = receive()
| true | true |
f71d52ac319dee155d13be3f21121985b6a7e93f | 3,813 | py | Python | handler/__init__.py | glibin/binder-proxy | 6ceac0cacba3e5ba8e88f020bc411fac6ab50313 | [
"MIT"
] | null | null | null | handler/__init__.py | glibin/binder-proxy | 6ceac0cacba3e5ba8e88f020bc411fac6ab50313 | [
"MIT"
] | null | null | null | handler/__init__.py | glibin/binder-proxy | 6ceac0cacba3e5ba8e88f020bc411fac6ab50313 | [
"MIT"
] | null | null | null | from tornado.options import options
from tortik.page import RequestHandler
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
class PageHandler(RequestHandler):
"""Base handler"""
preprocessors = []
postprocessors = []
def make_request(self, *args, **kwargs):
kwargs['validate_cert'] = False
return RequestHandler.make_request(self, *args, **kwargs)
class ProxyHandler(PageHandler):
"""Proxy handler"""
postprocessors = []
proxy_request_name = 'proxy-request'
exclude_headers = {'Connection', 'Keep-Alive', 'Content-Length', 'Content-Encoding', 'Proxy-Authenticate',
'Proxy-Authorization', 'TE', 'Trailers', 'Transfer-Encoding', 'Upgrade'}
def check_xsrf_cookie(self):
return
def _get_proxy_headers(self):
"""Getting headers to pass to proxy
By default, proxies all headers from request
"""
headers = self.request.headers
parsed_url = urlparse.urlsplit(self._get_proxy_url())
headers['Host'] = parsed_url.netloc
return headers
def _get_proxy_url(self):
"""Getting base url part (``http://example.com``) for proxy request.
By default takes value from ``options.proxy_url``
"""
return options.proxy_url
def _get_proxy_uri(self):
"""Get request uri and parameters (``/some/path?a=1``) for proxy request.
By default, takes data from ``request.uri``
"""
return self.request.uri
def proxy(self, method='GET', callback=None, data=''):
"""Method for request proxy
:param method: HTTP-method of proxyed request
:param callback: function to be called after proxy request would be finished
:param data: request parameters or body
"""
self.fetch_requests(self.make_request(
name=self.proxy_request_name,
method=method,
full_url=self._get_proxy_url() + self._get_proxy_uri(),
headers=self._get_proxy_headers(),
follow_redirects=False,
data=data if (data or method in ['GET', 'DELETE']) else self.request.body,
connect_timeout=options.proxy_timeout,
request_timeout=options.proxy_timeout
), callback=callback if callback is not None else self.handle_response)
def handle_response(self):
response = self.responses.get(self.proxy_request_name)
redirected = self.handle_redirects(response)
if not redirected:
self.set_status(response.code)
self.complete(response.body)
def handle_redirects(self, response):
for header in response.headers:
if header == 'Set-Cookie': # cookies could come in multiple headers
values = response.headers.get_list(header)
for value in values:
self.add_header(header, value)
continue
if header not in self.exclude_headers:
self.set_header(header, response.headers.get(header))
if response.code in (301, 302, 303, 307):
self.redirect(response.headers.get('Location') or '/')
return True
elif response.code == 304:
self.set_status(response.code)
self.complete()
return True
elif response.code in (504, 599) and not options.debug:
self.set_status(500)
self.complete()
return True
return False
def get(self, *args, **kwargs):
self.proxy()
def post(self, *args, **kwargs):
self.proxy(method='POST')
def put(self, *args, **kwargs):
self.proxy(method='PUT')
def delete(self, *args, **kwargs):
self.proxy(method='DELETE')
| 32.87069 | 110 | 0.617886 | from tornado.options import options
from tortik.page import RequestHandler
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class PageHandler(RequestHandler):
preprocessors = []
postprocessors = []
def make_request(self, *args, **kwargs):
kwargs['validate_cert'] = False
return RequestHandler.make_request(self, *args, **kwargs)
class ProxyHandler(PageHandler):
postprocessors = []
proxy_request_name = 'proxy-request'
exclude_headers = {'Connection', 'Keep-Alive', 'Content-Length', 'Content-Encoding', 'Proxy-Authenticate',
'Proxy-Authorization', 'TE', 'Trailers', 'Transfer-Encoding', 'Upgrade'}
def check_xsrf_cookie(self):
return
def _get_proxy_headers(self):
headers = self.request.headers
parsed_url = urlparse.urlsplit(self._get_proxy_url())
headers['Host'] = parsed_url.netloc
return headers
def _get_proxy_url(self):
return options.proxy_url
def _get_proxy_uri(self):
return self.request.uri
def proxy(self, method='GET', callback=None, data=''):
self.fetch_requests(self.make_request(
name=self.proxy_request_name,
method=method,
full_url=self._get_proxy_url() + self._get_proxy_uri(),
headers=self._get_proxy_headers(),
follow_redirects=False,
data=data if (data or method in ['GET', 'DELETE']) else self.request.body,
connect_timeout=options.proxy_timeout,
request_timeout=options.proxy_timeout
), callback=callback if callback is not None else self.handle_response)
def handle_response(self):
response = self.responses.get(self.proxy_request_name)
redirected = self.handle_redirects(response)
if not redirected:
self.set_status(response.code)
self.complete(response.body)
def handle_redirects(self, response):
for header in response.headers:
if header == 'Set-Cookie':
values = response.headers.get_list(header)
for value in values:
self.add_header(header, value)
continue
if header not in self.exclude_headers:
self.set_header(header, response.headers.get(header))
if response.code in (301, 302, 303, 307):
self.redirect(response.headers.get('Location') or '/')
return True
elif response.code == 304:
self.set_status(response.code)
self.complete()
return True
elif response.code in (504, 599) and not options.debug:
self.set_status(500)
self.complete()
return True
return False
def get(self, *args, **kwargs):
self.proxy()
def post(self, *args, **kwargs):
self.proxy(method='POST')
def put(self, *args, **kwargs):
self.proxy(method='PUT')
def delete(self, *args, **kwargs):
self.proxy(method='DELETE')
| true | true |
f71d52f51e8aedf2a36a0f268bdcc342349d4ecf | 2,681 | py | Python | trail/upload/process_uploads/upload_wrapper.py | DinoBektesevic/trailblazer | 31aeb2b2e3ab0cd97c4e4d2c0e26043f559abe06 | [
"MIT"
] | null | null | null | trail/upload/process_uploads/upload_wrapper.py | DinoBektesevic/trailblazer | 31aeb2b2e3ab0cd97c4e4d2c0e26043f559abe06 | [
"MIT"
] | null | null | null | trail/upload/process_uploads/upload_wrapper.py | DinoBektesevic/trailblazer | 31aeb2b2e3ab0cd97c4e4d2c0e26043f559abe06 | [
"MIT"
] | null | null | null | import os.path
from pathlib import Path
from django.conf import settings
"""
Wrapper to Django's TemporaryUploadedFile that adds additional path
manipulation and file saving functionality.
"""
__all__ = ["TemporaryUploadedFileWrapper", ]
class TemporaryUploadedFileWrapper:
"""Wrapper of TemporaryUploadedFile class.
Parameters
----------
upload : `django.core.files.uploadedfile.TemporaryUploadedFile`
Uploaded file object.
"""
save_root = os.path.join(settings.STATIC_ROOT, "upload/fits/")
"""Root of the location where upload will be permanently saved."""
special_extensions = {".gz", ".bz2", ".xz", ".fz"}
"""File extensions recognized as processable archives."""
def __init__(self, upload):
self.tmpfile = upload
self.filename = upload.name
def __repr__(self):
repr = super().__repr__()
clsPath = repr.split(self.__class__.__name__)[0]
return f"{clsPath}{self.__class__.__name__}({self.filename})>"
@property
def extension(self):
"""File extension that respects most popularly used archive and
compressed archive extensions.
Returns
-------
ext : `str`
File extension of the uploaded file. If the name of the uploaded
file has no extension, returns an empty string.
Example
-------
If the names of uploaded files are `image.fits.tar.bz2`, `image.fits`
and `image``returns `.fits.tar.bz2`, `.fits` and ``.
"""
fname = Path(self.filename)
extensions = fname.suffixes
if not extensions:
return ""
# if we recognize the special extensions as one of the acceptable
# special extensions (tars, fz etc.) return all of them
if extensions[-1] in self.special_extensions:
return "".join(extensions)
# otherwise just the last one
return extensions.pop()
@property
def basename(self):
"""Name of the uploaded file without extensions.
Returns
-------
name : `str`
Name of the uploaded file without extensions.
"""
return self.filename.split(self.extension)[0]
def save(self):
"""Saves uploaded file to desired destination.
Returns
-------
tgtPath : `str`
Path where the file was saved.
"""
#TODO: fix os.path when transitioning to S3
# make the destination configurable
tgtPath = os.path.join(self.save_root, self.filename)
with open(tgtPath, "wb") as f:
f.write(self.tmpfile.read())
return tgtPath
| 28.221053 | 77 | 0.613204 | import os.path
from pathlib import Path
from django.conf import settings
__all__ = ["TemporaryUploadedFileWrapper", ]
class TemporaryUploadedFileWrapper:
save_root = os.path.join(settings.STATIC_ROOT, "upload/fits/")
special_extensions = {".gz", ".bz2", ".xz", ".fz"}
def __init__(self, upload):
self.tmpfile = upload
self.filename = upload.name
def __repr__(self):
repr = super().__repr__()
clsPath = repr.split(self.__class__.__name__)[0]
return f"{clsPath}{self.__class__.__name__}({self.filename})>"
@property
def extension(self):
fname = Path(self.filename)
extensions = fname.suffixes
if not extensions:
return ""
if extensions[-1] in self.special_extensions:
return "".join(extensions)
return extensions.pop()
@property
def basename(self):
return self.filename.split(self.extension)[0]
def save(self):
tgtPath = os.path.join(self.save_root, self.filename)
with open(tgtPath, "wb") as f:
f.write(self.tmpfile.read())
return tgtPath
| true | true |
f71d54b96a3930c6779dc4acaf02a9055a09e4e1 | 1,374 | py | Python | Library/Application Support/iTerm2/Scripts/AutoLaunch/change_color_preset_on_theme_change.py | timriley/dotfiles | 60ec6fabd5007a4e916e7a91bb831206dd9ede92 | [
"MIT"
] | 4 | 2015-08-26T02:39:23.000Z | 2022-02-04T02:31:54.000Z | Library/Application Support/iTerm2/Scripts/AutoLaunch/change_color_preset_on_theme_change.py | timriley/dotfiles | 60ec6fabd5007a4e916e7a91bb831206dd9ede92 | [
"MIT"
] | 3 | 2019-06-05T02:00:14.000Z | 2020-04-06T23:14:52.000Z | Library/Application Support/iTerm2/Scripts/AutoLaunch/change_color_preset_on_theme_change.py | timriley/dotfiles | 60ec6fabd5007a4e916e7a91bb831206dd9ede92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This script was created with the "basic" environment which does not support
# adding dependencies with pip.
# Taken from https://iterm2.com/python-api/examples/theme.html
import asyncio
import iterm2
async def update(connection, theme):
# Themes have space-delimited attributes, one of which will be light or dark.
parts = theme.split(" ")
if "dark" in parts:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-dark-256")
else:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-light-256")
# Update the list of all profiles and iterate over them.
profiles=await iterm2.PartialProfile.async_query(connection)
for partial in profiles:
# Fetch the full profile and then set the color preset in it.
profile = await partial.async_get_full_profile()
await profile.async_set_color_preset(preset)
async def main(connection):
app = await iterm2.async_get_app(connection)
await update(connection, await app.async_get_variable("effectiveTheme"))
async with iterm2.VariableMonitor(connection, iterm2.VariableScopes.APP, "effectiveTheme", None) as mon:
while True:
# Block until theme changes
theme = await mon.async_get()
await update(connection, theme)
iterm2.run_forever(main)
| 38.166667 | 108 | 0.723435 |
import asyncio
import iterm2
async def update(connection, theme):
parts = theme.split(" ")
if "dark" in parts:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-dark-256")
else:
preset = await iterm2.ColorPreset.async_get(connection, "base16-summerfruit-light-256")
profiles=await iterm2.PartialProfile.async_query(connection)
for partial in profiles:
profile = await partial.async_get_full_profile()
await profile.async_set_color_preset(preset)
async def main(connection):
app = await iterm2.async_get_app(connection)
await update(connection, await app.async_get_variable("effectiveTheme"))
async with iterm2.VariableMonitor(connection, iterm2.VariableScopes.APP, "effectiveTheme", None) as mon:
while True:
theme = await mon.async_get()
await update(connection, theme)
iterm2.run_forever(main)
| true | true |
f71d56c8acadb5d48d5ef1022bcaeb86b7ee9287 | 301 | py | Python | data/multilingual/Latn.SHP/Mono_8/pdf_to_json_test_Latn.SHP_Mono_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.SHP/Mono_8/pdf_to_json_test_Latn.SHP_Mono_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.SHP/Mono_8/pdf_to_json_test_Latn.SHP_Mono_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SHP/Mono_8/udhr_Latn.SHP_Mono_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1 | 71 | 0.810631 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SHP/Mono_8/udhr_Latn.SHP_Mono_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
f71d56d8bf4219dc71815c78eb65fcea2e48444a | 11,552 | py | Python | distributed/diagnostics/progress.py | bachsh/distributed | 18d7b24ba315f6a5c0c892509e72d539f02babfb | [
"BSD-3-Clause"
] | 3 | 2021-05-27T07:40:11.000Z | 2021-05-27T07:40:16.000Z | distributed/diagnostics/progress.py | happyLeecz/DaskServerless | c1ddfe678fedc7f7ecb6eabb784b967f71a2054a | [
"BSD-3-Clause"
] | null | null | null | distributed/diagnostics/progress.py | happyLeecz/DaskServerless | c1ddfe678fedc7f7ecb6eabb784b967f71a2054a | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import logging
from collections import defaultdict
from timeit import default_timer
from tlz import groupby, valmap
from dask.utils import stringify
from ..utils import key_split, key_split_group, log_errors
from .plugin import SchedulerPlugin
logger = logging.getLogger(__name__)
def dependent_keys(tasks, complete=False):
"""
All keys that need to compute for these keys to finish.
If *complete* is false, omit tasks that are busy processing or
have finished executing.
"""
out = set()
errors = set()
stack = list(tasks)
while stack:
ts = stack.pop()
key = ts.key
if key in out:
continue
if not complete and ts.who_has:
continue
if ts.exception is not None:
errors.add(key)
if not complete:
continue
out.add(key)
stack.extend(ts.dependencies)
return out, errors
class Progress(SchedulerPlugin):
"""Tracks progress of a set of keys or futures
On creation we provide a set of keys or futures that interest us as well as
a scheduler. We traverse through the scheduler's dependencies to find all
relevant keys on which our keys depend. We then plug into the scheduler to
learn when our keys become available in memory at which point we record
their completion.
State
-----
keys: set
Set of keys that are not yet computed
all_keys: set
Set of all keys that we track
This class performs no visualization. However it is used by other classes,
notably TextProgressBar and ProgressWidget, which do perform visualization.
"""
def __init__(self, keys, scheduler, minimum=0, dt=0.1, complete=False):
self.keys = {k.key if hasattr(k, "key") else k for k in keys}
self.keys = {stringify(k) for k in self.keys}
self.scheduler = scheduler
self.complete = complete
self._minimum = minimum
self._dt = dt
self.last_duration = 0
self._start_time = default_timer()
self._running = False
self.status = None
self.extra = {}
async def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
await asyncio.sleep(0.05)
tasks = [self.scheduler.tasks[k] for k in keys]
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(tasks, complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
logger.debug("Set up Progress keys")
for k in errors:
self.transition(k, None, "erred", exception=True)
def transition(self, key, start, finish, *args, **kwargs):
if key in self.keys and start == "processing" and finish == "memory":
logger.debug("Progress sees key %s", key)
self.keys.remove(key)
if not self.keys:
self.stop()
if key in self.all_keys and finish == "erred":
logger.debug("Progress sees task erred")
self.stop(exception=kwargs["exception"], key=key)
if key in self.keys and finish == "forgotten":
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True, key=key)
def restart(self, scheduler):
self.stop()
def stop(self, exception=None, key=None):
if self in self.scheduler.plugins:
self.scheduler.plugins.remove(self)
if exception:
self.status = "error"
self.extra.update({"exception": self.scheduler.exceptions[key], "key": key})
else:
self.status = "finished"
logger.debug("Remove Progress plugin")
class MultiProgress(Progress):
"""Progress variant that keeps track of different groups of keys
See Progress for most details. This only adds a function ``func=``
that splits keys. This defaults to ``key_split`` which aligns with naming
conventions chosen in the dask project (tuples, hyphens, etc..)
State
-----
keys: dict
Maps group name to set of not-yet-complete keys for that group
all_keys: dict
Maps group name to set of all keys for that group
Examples
--------
>>> split = lambda s: s.split('-')[0]
>>> p = MultiProgress(['y-2'], func=split) # doctest: +SKIP
>>> p.keys # doctest: +SKIP
{'x': {'x-1', 'x-2', 'x-3'},
'y': {'y-1', 'y-2'}}
"""
def __init__(
self, keys, scheduler=None, func=key_split, minimum=0, dt=0.1, complete=False
):
self.func = func
Progress.__init__(
self, keys, scheduler, minimum=minimum, dt=dt, complete=complete
)
async def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
await asyncio.sleep(0.05)
tasks = [self.scheduler.tasks[k] for k in keys]
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(tasks, complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
# Group keys by func name
self.keys = valmap(set, groupby(self.func, self.keys))
self.all_keys = valmap(set, groupby(self.func, self.all_keys))
for k in self.all_keys:
if k not in self.keys:
self.keys[k] = set()
for k in errors:
self.transition(k, None, "erred", exception=True)
logger.debug("Set up Progress keys")
def transition(self, key, start, finish, *args, **kwargs):
if start == "processing" and finish == "memory":
s = self.keys.get(self.func(key), None)
if s and key in s:
s.remove(key)
if not self.keys or not any(self.keys.values()):
self.stop()
if finish == "erred":
logger.debug("Progress sees task erred")
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
self.stop(exception=kwargs.get("exception"), key=key)
if finish == "forgotten":
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
>>> format_time(100000.4)
'27hr 46min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return "{0:2.0f}hr {1:2.0f}min {2:4.1f}s".format(h, m, s)
elif m:
return "{0:2.0f}min {1:4.1f}s".format(m, s)
else:
return "{0:4.1f}s".format(s)
class AllProgress(SchedulerPlugin):
""" Keep track of all keys, grouped by key_split """
def __init__(self, scheduler):
self.all = defaultdict(set)
self.nbytes = defaultdict(lambda: 0)
self.state = defaultdict(lambda: defaultdict(set))
self.scheduler = scheduler
for ts in self.scheduler.tasks.values():
key = ts.key
prefix = ts.prefix.name
self.all[prefix].add(key)
self.state[ts.state][prefix].add(key)
if ts.nbytes >= 0:
self.nbytes[prefix] += ts.nbytes
scheduler.add_plugin(self)
def transition(self, key, start, finish, *args, **kwargs):
ts = self.scheduler.tasks[key]
prefix = ts.prefix.name
self.all[prefix].add(key)
try:
self.state[start][prefix].remove(key)
except KeyError: # TODO: remove me once we have a new or clean state
pass
if start == "memory" and ts.nbytes >= 0:
# XXX why not respect DEFAULT_DATA_SIZE?
self.nbytes[prefix] -= ts.nbytes
if finish == "memory" and ts.nbytes >= 0:
self.nbytes[prefix] += ts.nbytes
if finish != "forgotten":
self.state[finish][prefix].add(key)
else:
s = self.all[prefix]
s.remove(key)
if not s:
del self.all[prefix]
self.nbytes.pop(prefix, None)
for v in self.state.values():
v.pop(prefix, None)
def restart(self, scheduler):
self.all.clear()
self.state.clear()
class GroupProgress(SchedulerPlugin):
""" Keep track of all keys, grouped by key_split """
def __init__(self, scheduler):
self.scheduler = scheduler
self.keys = dict()
self.groups = dict()
self.nbytes = dict()
self.durations = dict()
self.dependencies = defaultdict(set)
self.dependents = defaultdict(set)
for key, ts in self.scheduler.tasks.items():
k = key_split_group(key)
if k not in self.groups:
self.create(key, k)
self.keys[k].add(key)
self.groups[k][ts.state] += 1
if ts.state == "memory" and ts.nbytes >= 0:
self.nbytes[k] += ts.nbytes
scheduler.add_plugin(self)
def create(self, key, k):
with log_errors():
ts = self.scheduler.tasks[key]
g = {"memory": 0, "erred": 0, "waiting": 0, "released": 0, "processing": 0}
self.keys[k] = set()
self.groups[k] = g
self.nbytes[k] = 0
self.durations[k] = 0
self.dependents[k] = {key_split_group(dts.key) for dts in ts.dependents}
for dts in ts.dependencies:
d = key_split_group(dts.key)
self.dependents[d].add(k)
self.dependencies[k].add(d)
def transition(self, key, start, finish, *args, **kwargs):
with log_errors():
ts = self.scheduler.tasks[key]
k = key_split_group(key)
if k not in self.groups:
self.create(key, k)
g = self.groups[k]
if key not in self.keys[k]:
self.keys[k].add(key)
else:
g[start] -= 1
if finish != "forgotten":
g[finish] += 1
else:
self.keys[k].remove(key)
if not self.keys[k]:
del self.groups[k]
del self.nbytes[k]
for dep in self.dependencies.pop(k):
self.dependents[key_split_group(dep)].remove(k)
if start == "memory" and ts.nbytes >= 0:
self.nbytes[k] -= ts.nbytes
if finish == "memory" and ts.nbytes >= 0:
self.nbytes[k] += ts.nbytes
def restart(self, scheduler):
self.keys.clear()
self.groups.clear()
self.nbytes.clear()
self.durations.clear()
self.dependencies.clear()
self.dependents.clear()
| 31.911602 | 88 | 0.569598 | import asyncio
import logging
from collections import defaultdict
from timeit import default_timer
from tlz import groupby, valmap
from dask.utils import stringify
from ..utils import key_split, key_split_group, log_errors
from .plugin import SchedulerPlugin
logger = logging.getLogger(__name__)
def dependent_keys(tasks, complete=False):
out = set()
errors = set()
stack = list(tasks)
while stack:
ts = stack.pop()
key = ts.key
if key in out:
continue
if not complete and ts.who_has:
continue
if ts.exception is not None:
errors.add(key)
if not complete:
continue
out.add(key)
stack.extend(ts.dependencies)
return out, errors
class Progress(SchedulerPlugin):
def __init__(self, keys, scheduler, minimum=0, dt=0.1, complete=False):
self.keys = {k.key if hasattr(k, "key") else k for k in keys}
self.keys = {stringify(k) for k in self.keys}
self.scheduler = scheduler
self.complete = complete
self._minimum = minimum
self._dt = dt
self.last_duration = 0
self._start_time = default_timer()
self._running = False
self.status = None
self.extra = {}
async def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
await asyncio.sleep(0.05)
tasks = [self.scheduler.tasks[k] for k in keys]
self.keys = None
self.scheduler.add_plugin(self)
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(tasks, complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
logger.debug("Set up Progress keys")
for k in errors:
self.transition(k, None, "erred", exception=True)
def transition(self, key, start, finish, *args, **kwargs):
if key in self.keys and start == "processing" and finish == "memory":
logger.debug("Progress sees key %s", key)
self.keys.remove(key)
if not self.keys:
self.stop()
if key in self.all_keys and finish == "erred":
logger.debug("Progress sees task erred")
self.stop(exception=kwargs["exception"], key=key)
if key in self.keys and finish == "forgotten":
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True, key=key)
def restart(self, scheduler):
self.stop()
def stop(self, exception=None, key=None):
if self in self.scheduler.plugins:
self.scheduler.plugins.remove(self)
if exception:
self.status = "error"
self.extra.update({"exception": self.scheduler.exceptions[key], "key": key})
else:
self.status = "finished"
logger.debug("Remove Progress plugin")
class MultiProgress(Progress):
def __init__(
self, keys, scheduler=None, func=key_split, minimum=0, dt=0.1, complete=False
):
self.func = func
Progress.__init__(
self, keys, scheduler, minimum=minimum, dt=dt, complete=complete
)
async def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
await asyncio.sleep(0.05)
tasks = [self.scheduler.tasks[k] for k in keys]
self.keys = None
self.scheduler.add_plugin(self)
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(tasks, complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
self.keys = valmap(set, groupby(self.func, self.keys))
self.all_keys = valmap(set, groupby(self.func, self.all_keys))
for k in self.all_keys:
if k not in self.keys:
self.keys[k] = set()
for k in errors:
self.transition(k, None, "erred", exception=True)
logger.debug("Set up Progress keys")
def transition(self, key, start, finish, *args, **kwargs):
if start == "processing" and finish == "memory":
s = self.keys.get(self.func(key), None)
if s and key in s:
s.remove(key)
if not self.keys or not any(self.keys.values()):
self.stop()
if finish == "erred":
logger.debug("Progress sees task erred")
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
self.stop(exception=kwargs.get("exception"), key=key)
if finish == "forgotten":
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def format_time(t):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return "{0:2.0f}hr {1:2.0f}min {2:4.1f}s".format(h, m, s)
elif m:
return "{0:2.0f}min {1:4.1f}s".format(m, s)
else:
return "{0:4.1f}s".format(s)
class AllProgress(SchedulerPlugin):
def __init__(self, scheduler):
self.all = defaultdict(set)
self.nbytes = defaultdict(lambda: 0)
self.state = defaultdict(lambda: defaultdict(set))
self.scheduler = scheduler
for ts in self.scheduler.tasks.values():
key = ts.key
prefix = ts.prefix.name
self.all[prefix].add(key)
self.state[ts.state][prefix].add(key)
if ts.nbytes >= 0:
self.nbytes[prefix] += ts.nbytes
scheduler.add_plugin(self)
def transition(self, key, start, finish, *args, **kwargs):
ts = self.scheduler.tasks[key]
prefix = ts.prefix.name
self.all[prefix].add(key)
try:
self.state[start][prefix].remove(key)
except KeyError:
pass
if start == "memory" and ts.nbytes >= 0:
self.nbytes[prefix] -= ts.nbytes
if finish == "memory" and ts.nbytes >= 0:
self.nbytes[prefix] += ts.nbytes
if finish != "forgotten":
self.state[finish][prefix].add(key)
else:
s = self.all[prefix]
s.remove(key)
if not s:
del self.all[prefix]
self.nbytes.pop(prefix, None)
for v in self.state.values():
v.pop(prefix, None)
def restart(self, scheduler):
self.all.clear()
self.state.clear()
class GroupProgress(SchedulerPlugin):
def __init__(self, scheduler):
self.scheduler = scheduler
self.keys = dict()
self.groups = dict()
self.nbytes = dict()
self.durations = dict()
self.dependencies = defaultdict(set)
self.dependents = defaultdict(set)
for key, ts in self.scheduler.tasks.items():
k = key_split_group(key)
if k not in self.groups:
self.create(key, k)
self.keys[k].add(key)
self.groups[k][ts.state] += 1
if ts.state == "memory" and ts.nbytes >= 0:
self.nbytes[k] += ts.nbytes
scheduler.add_plugin(self)
def create(self, key, k):
with log_errors():
ts = self.scheduler.tasks[key]
g = {"memory": 0, "erred": 0, "waiting": 0, "released": 0, "processing": 0}
self.keys[k] = set()
self.groups[k] = g
self.nbytes[k] = 0
self.durations[k] = 0
self.dependents[k] = {key_split_group(dts.key) for dts in ts.dependents}
for dts in ts.dependencies:
d = key_split_group(dts.key)
self.dependents[d].add(k)
self.dependencies[k].add(d)
def transition(self, key, start, finish, *args, **kwargs):
with log_errors():
ts = self.scheduler.tasks[key]
k = key_split_group(key)
if k not in self.groups:
self.create(key, k)
g = self.groups[k]
if key not in self.keys[k]:
self.keys[k].add(key)
else:
g[start] -= 1
if finish != "forgotten":
g[finish] += 1
else:
self.keys[k].remove(key)
if not self.keys[k]:
del self.groups[k]
del self.nbytes[k]
for dep in self.dependencies.pop(k):
self.dependents[key_split_group(dep)].remove(k)
if start == "memory" and ts.nbytes >= 0:
self.nbytes[k] -= ts.nbytes
if finish == "memory" and ts.nbytes >= 0:
self.nbytes[k] += ts.nbytes
def restart(self, scheduler):
self.keys.clear()
self.groups.clear()
self.nbytes.clear()
self.durations.clear()
self.dependencies.clear()
self.dependents.clear()
| true | true |
f71d57efae634d6940aee542cd1c41b728f66202 | 7,879 | py | Python | openff/toolkit/tests/test_utils_callback.py | ijpulidos/openff-toolkit | 24953a407c853411bee9584d29fa0fb953e59151 | [
"MIT"
] | null | null | null | openff/toolkit/tests/test_utils_callback.py | ijpulidos/openff-toolkit | 24953a407c853411bee9584d29fa0fb953e59151 | [
"MIT"
] | 10 | 2021-05-06T16:02:44.000Z | 2022-03-02T02:08:45.000Z | openff/toolkit/tests/test_utils_callback.py | justinGilmer/openforcefield | 1bb07cfa4ceffee4a1df760e44fdbdb1d281d1c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# =====================================================================
# MODULE DOCSTRING
# =====================================================================
"""
Tests for callback utility classes and functions.
"""
# =====================================================================
# GLOBAL IMPORTS
# =====================================================================
import pytest
from openff.toolkit.utils.callback import (
Callbackable,
CallbackRegistrationError,
callback_method,
)
# =====================================================================
# UTILITY CLASSES AND FUNCTIONS
# =====================================================================
class CallHistory:
"""Used to keep track of the order in which callbacks and methods are called by Callbackable."""
history = None
def reset_history(self):
CallHistory.history = []
@classmethod
def add_history_entry(cls, name, *args, **kwargs):
# Store args and kwargs in the history only if they are given.
history_entry = []
if len(args) != 0:
history_entry.append(args)
if len(kwargs) != 0:
history_entry.append(kwargs)
if len(history_entry) == 0:
cls.history.append(name)
else:
cls.history.append([name] + history_entry)
def instance_callback(self, callbackable, event_name, *args, **kwargs):
assert isinstance(self, object)
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
@classmethod
def class_callback(cls, callbackable, event_name, *args, **kwargs):
assert isinstance(cls, type)
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
@staticmethod
def static_callback(callbackable, event_name, *args, **kwargs):
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
call_history = CallHistory()
# =====================================================================
# Test Callbackable class
# =====================================================================
class TestCallbackable:
"""Test suite for the Callbackable base class."""
# ----------------------------- #
# Utility classes and functions #
# ----------------------------- #
class MyCallbackable(Callbackable):
@callback_method
def instance_method(self, *args, **kwargs):
CallHistory.add_history_entry("instance_method", *args, **kwargs)
@callback_method
def __iadd__(self, other):
CallHistory.add_history_entry("__iadd__", other)
@callback_method(events=["event1"])
def event_method1(self, *args, **kwargs):
CallHistory.add_history_entry("event_method1", *args, **kwargs)
@callback_method(events=["event1", "event2"])
def event_method2(self, *args, **kwargs):
CallHistory.add_history_entry("event_method2", *args, **kwargs)
def check_method_call_order(
self, callbackable, event_name, event_sequence, *args, **kwargs
):
"""Check that callback and methods/attributes are invoked in the correct order.
This also formats the history correctly if args and kwargs are given.
"""
# Modify the expected history if args and kwargs are given.
if len(args) == 0 and len(kwargs) == 0:
expected_history = event_sequence
else:
expected_history = [[event_name] for event_name in event_sequence]
if len(args) != 0:
for event in expected_history:
event.append(args)
if len(kwargs) != 0:
for event in expected_history:
event.append(kwargs)
# Reset history and verify that the calls are in the correct order.
call_history.reset_history()
getattr(callbackable, event_name)(*args, **kwargs)
assert call_history.history == expected_history
# ----- #
# Tests #
# ----- #
@pytest.mark.parametrize("event_name", ["instance_method"])
@pytest.mark.parametrize(
"callback",
[
call_history.instance_callback,
CallHistory.class_callback,
CallHistory.static_callback,
],
)
@pytest.mark.parametrize(
"args,kwargs", [([], {}), ([1, 2.0], {"kwarg1": 0, "kwarg2": None})]
)
def test_register_method_callback(self, event_name, callback, args, kwargs):
"""Methods' callbacks are invoked in the correct order and with the correct arguments."""
callbackable = TestCallbackable.MyCallbackable()
# No callback is called before registration.
event_sequence = [event_name]
self.check_method_call_order(
callbackable, event_name, event_sequence, *args, **kwargs
)
# Register the callback.
callbackable.register_callback(event_name, callback)
# After the registration, the callback is invoked correctly.
event_sequence = [event_name, "callback_" + event_name]
self.check_method_call_order(
callbackable, event_name, event_sequence, *args, **kwargs
)
def test_register_magic_method_callback(self):
"""Callbacks registered to magic methods are invoked correctly."""
callbackable = TestCallbackable.MyCallbackable()
callbackable.register_callback("__iadd__", call_history.instance_callback)
extension = [1, 2]
call_history.reset_history()
callbackable += extension
assert call_history.history == [
["__iadd__", (extension,)],
["callback___iadd__", (extension,)],
]
def test_register_event_callback(self):
"""Callbacks registered to a event are handled corectly."""
callbackable = TestCallbackable.MyCallbackable()
# Register the callbacks to event1 (event_method1 and event_method2).
callbackable.register_callback("event1", call_history.instance_callback)
callbackable.register_callback("event1", CallHistory.class_callback)
# Register one callback to event2 (only event_method2).
callbackable.register_callback("event2", CallHistory.static_callback)
# Check the event sequence for both methods belong to the two events.
event_sequence = [
"event_method1",
"callback_event_method1",
"callback_event_method1",
]
self.check_method_call_order(callbackable, "event_method1", event_sequence)
event_sequence = [
"event_method2",
"callback_event_method2",
"callback_event_method2",
"callback_event_method2",
]
self.check_method_call_order(callbackable, "event_method2", event_sequence)
def test_not_callback_method_raise_exception(self):
"""An exception is raised if a callback is registered for a method not tagged with callback_method."""
class TempCallbackable(Callbackable):
def not_callback_method(self):
pass
callbackable = TempCallbackable()
with pytest.raises(
CallbackRegistrationError,
match="is not tagged with the @callback_method decorator",
):
callbackable.register_callback(
"not_callback_method", call_history.instance_callback
)
def test_unknown_event_raise_exception(self):
"""An exception is raised if a callback is registered for an unknown callback event."""
callbackable = TestCallbackable.MyCallbackable()
with pytest.raises(
CallbackRegistrationError,
match='is associated to the callback event "unknown"',
):
callbackable.register_callback("unknown", call_history.instance_callback)
| 35.977169 | 110 | 0.596395 |
import pytest
from openff.toolkit.utils.callback import (
Callbackable,
CallbackRegistrationError,
callback_method,
)
class CallHistory:
history = None
def reset_history(self):
CallHistory.history = []
@classmethod
def add_history_entry(cls, name, *args, **kwargs):
history_entry = []
if len(args) != 0:
history_entry.append(args)
if len(kwargs) != 0:
history_entry.append(kwargs)
if len(history_entry) == 0:
cls.history.append(name)
else:
cls.history.append([name] + history_entry)
def instance_callback(self, callbackable, event_name, *args, **kwargs):
assert isinstance(self, object)
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
@classmethod
def class_callback(cls, callbackable, event_name, *args, **kwargs):
assert isinstance(cls, type)
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
@staticmethod
def static_callback(callbackable, event_name, *args, **kwargs):
CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs)
call_history = CallHistory()
class TestCallbackable:
class MyCallbackable(Callbackable):
@callback_method
def instance_method(self, *args, **kwargs):
CallHistory.add_history_entry("instance_method", *args, **kwargs)
@callback_method
def __iadd__(self, other):
CallHistory.add_history_entry("__iadd__", other)
@callback_method(events=["event1"])
def event_method1(self, *args, **kwargs):
CallHistory.add_history_entry("event_method1", *args, **kwargs)
@callback_method(events=["event1", "event2"])
def event_method2(self, *args, **kwargs):
CallHistory.add_history_entry("event_method2", *args, **kwargs)
def check_method_call_order(
self, callbackable, event_name, event_sequence, *args, **kwargs
):
if len(args) == 0 and len(kwargs) == 0:
expected_history = event_sequence
else:
expected_history = [[event_name] for event_name in event_sequence]
if len(args) != 0:
for event in expected_history:
event.append(args)
if len(kwargs) != 0:
for event in expected_history:
event.append(kwargs)
call_history.reset_history()
getattr(callbackable, event_name)(*args, **kwargs)
assert call_history.history == expected_history
@pytest.mark.parametrize("event_name", ["instance_method"])
@pytest.mark.parametrize(
"callback",
[
call_history.instance_callback,
CallHistory.class_callback,
CallHistory.static_callback,
],
)
@pytest.mark.parametrize(
"args,kwargs", [([], {}), ([1, 2.0], {"kwarg1": 0, "kwarg2": None})]
)
def test_register_method_callback(self, event_name, callback, args, kwargs):
callbackable = TestCallbackable.MyCallbackable()
event_sequence = [event_name]
self.check_method_call_order(
callbackable, event_name, event_sequence, *args, **kwargs
)
callbackable.register_callback(event_name, callback)
event_sequence = [event_name, "callback_" + event_name]
self.check_method_call_order(
callbackable, event_name, event_sequence, *args, **kwargs
)
def test_register_magic_method_callback(self):
callbackable = TestCallbackable.MyCallbackable()
callbackable.register_callback("__iadd__", call_history.instance_callback)
extension = [1, 2]
call_history.reset_history()
callbackable += extension
assert call_history.history == [
["__iadd__", (extension,)],
["callback___iadd__", (extension,)],
]
def test_register_event_callback(self):
callbackable = TestCallbackable.MyCallbackable()
callbackable.register_callback("event1", call_history.instance_callback)
callbackable.register_callback("event1", CallHistory.class_callback)
callbackable.register_callback("event2", CallHistory.static_callback)
event_sequence = [
"event_method1",
"callback_event_method1",
"callback_event_method1",
]
self.check_method_call_order(callbackable, "event_method1", event_sequence)
event_sequence = [
"event_method2",
"callback_event_method2",
"callback_event_method2",
"callback_event_method2",
]
self.check_method_call_order(callbackable, "event_method2", event_sequence)
def test_not_callback_method_raise_exception(self):
class TempCallbackable(Callbackable):
def not_callback_method(self):
pass
callbackable = TempCallbackable()
with pytest.raises(
CallbackRegistrationError,
match="is not tagged with the @callback_method decorator",
):
callbackable.register_callback(
"not_callback_method", call_history.instance_callback
)
def test_unknown_event_raise_exception(self):
callbackable = TestCallbackable.MyCallbackable()
with pytest.raises(
CallbackRegistrationError,
match='is associated to the callback event "unknown"',
):
callbackable.register_callback("unknown", call_history.instance_callback)
| true | true |
f71d581144f52f714353d28b59cb7d3475bcf930 | 5,279 | py | Python | ikfs_anomaly_detector/core/config.py | DSPLab-IC6/ikfs_anomaly_detector | e0a36e185be6e9dcd75451c956a2aaf6a6fec677 | [
"MIT"
] | null | null | null | ikfs_anomaly_detector/core/config.py | DSPLab-IC6/ikfs_anomaly_detector | e0a36e185be6e9dcd75451c956a2aaf6a6fec677 | [
"MIT"
] | 4 | 2020-01-28T22:45:39.000Z | 2022-02-10T00:21:51.000Z | ikfs_anomaly_detector/core/config.py | DSPLab-IC6/ikfs_anomaly_detector | e0a36e185be6e9dcd75451c956a2aaf6a6fec677 | [
"MIT"
] | null | null | null | import os
from dataclasses import dataclass
from typing import List
import yaml
from ikfs_anomaly_detector.core.format.telemetry import TelemetryAttrs, Counters
from ikfs_anomaly_detector.intellectual.autoencoder import SignalsGroup
DEFAULT_CONFIG_PATH = os.path.join(os.getcwd(), 'default_config.yml')
DEFAULT_CONFIG = {
'models_dir': '',
'tensorboard_dir': '',
'analysis_result_dir': '/tmp/ikfs_anomaly_detector/results',
'predictor_for': [
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_sample_count,
TelemetryAttrs.scanner_angle,
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature,
TelemetryAttrs.tu2_temperature,
],
'autoencoder_for': {
'bfk': [
# TelemetryAttrs.channel_bfk,
# TelemetryAttrs.state_bfk,
Counters.bfk_cnt_err_crc,
Counters.bfk_cnt_err_rx_buf_alloc,
Counters.bfk_cnt_err_rx_packet,
Counters.bfk_cnt_err_too_big_can_tx,
Counters.bfk_cnt_lost_interf,
Counters.bfk_cnt_marker_bpop,
Counters.bfk_cnt_marker_bud,
Counters.bfk_cnt_timeout_marker_bpop,
Counters.bfk_cnt_timeout_marker_bud,
],
'bpop': [
# TelemetryAttrs.channel_bpop,
# TelemetryAttrs.power_bpop15v,
# TelemetryAttrs.power_bpop5v,
# TelemetryAttrs.state_bpop,
Counters.bpop_cnt_err_adc_spi_overrun,
Counters.bpop_cnt_err_crc,
Counters.bpop_cnt_err_marker_access,
Counters.bpop_cnt_err_rx_pkt,
Counters.bpop_cnt_marker,
Counters.bpop_cnt_marker_other,
],
'bud': [
# TelemetryAttrs.channel_bud,
# TelemetryAttrs.power_bud10v,
# TelemetryAttrs.power_bud27vi,
# TelemetryAttrs.power_bud27vo,
# TelemetryAttrs.state_bud,
Counters.bud_cnt_err_crc,
Counters.bud_cnt_err_kachalka_brake,
Counters.bud_cnt_err_kachalka_timeout,
Counters.bud_cnt_err_marker_access,
Counters.bud_cnt_err_ref_missed_impulses,
Counters.bud_cnt_err_rx_overflow,
Counters.bud_cnt_err_rx_packet,
Counters.bud_cnt_err_sp_tx_alloc,
Counters.bud_cnt_marker,
Counters.bud_cnt_marker_other,
Counters.bud_cnt_mbx_cmd_busy,
],
'bud_board': [
TelemetryAttrs.power_bpop15v,
TelemetryAttrs.power_bpop5v,
TelemetryAttrs.power_bud10v,
TelemetryAttrs.power_bud27vo,
TelemetryAttrs.power_bud27vi,
],
'fp': [
TelemetryAttrs.tu2_temperature,
TelemetryAttrs.fp_temperature,
],
'mi': [
TelemetryAttrs.mi1_temperature,
TelemetryAttrs.mi2_temperature,
TelemetryAttrs.mi1_heater_state,
TelemetryAttrs.mi2_heater_state,
],
'mk': [
TelemetryAttrs.mk1_temperature,
TelemetryAttrs.mk2_temperature,
TelemetryAttrs.mk_heater_state,
],
'ppt': [
TelemetryAttrs.ppt_zone,
TelemetryAttrs.ppt_ref,
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_in_zone,
TelemetryAttrs.scanner_angle,
],
'ppt_direction': [
TelemetryAttrs.ppt_direction,
TelemetryAttrs.ifg_max_index,
],
'str': [
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature
],
},
'thresholds': {
'default': {
'rules': 0.55,
'bfk': 0.2,
'bpop': 0.4,
'bud': 6.,
'bud_board': 15.,
'fp': 0.7,
'mi': 0.4,
'mk': 0.09,
'ppt': 0.27,
'ppt_direction': 0.1,
'str': 0.05,
'PptRiple': 100,
'PptSampleCount': 100,
'ScannerAngle': 610,
'Str27V': 210,
'StrSensorTu1': 100,
'StrSensorTu2': 100,
},
},
}
@dataclass
class Config:
data: dict
@property
def models_dir(self) -> str:
return self.data['models_dir']
@property
def tensorboard_dir(self) -> str:
return self.data['tensorboard_dir']
@property
def analysis_result_dir(self) -> str:
return self.data['analysis_result_dir']
@property
def signals_for_predictor(self) -> List[str]:
return self.data['predictor_for'] or []
@property
def signals_groups(self) -> List[SignalsGroup]:
return [
SignalsGroup(name=group_name, signals=signals)
for group_name, signals in (self.data['autoencoder_for'] or {}).items()
]
@property
def thresholds(self) -> dict:
return self.data['thresholds'] or {}
def dump_default_config() -> str:
with open(DEFAULT_CONFIG_PATH, 'w') as f:
yaml.dump(DEFAULT_CONFIG, stream=f, indent=2, explicit_start=True, sort_keys=False)
return DEFAULT_CONFIG_PATH
def load_config(path: str) -> Config:
with open(path, 'r') as f:
return Config(data=yaml.load(f, Loader=yaml.FullLoader))
| 29.005495 | 91 | 0.596325 | import os
from dataclasses import dataclass
from typing import List
import yaml
from ikfs_anomaly_detector.core.format.telemetry import TelemetryAttrs, Counters
from ikfs_anomaly_detector.intellectual.autoencoder import SignalsGroup
DEFAULT_CONFIG_PATH = os.path.join(os.getcwd(), 'default_config.yml')
DEFAULT_CONFIG = {
'models_dir': '',
'tensorboard_dir': '',
'analysis_result_dir': '/tmp/ikfs_anomaly_detector/results',
'predictor_for': [
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_sample_count,
TelemetryAttrs.scanner_angle,
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature,
TelemetryAttrs.tu2_temperature,
],
'autoencoder_for': {
'bfk': [
Counters.bfk_cnt_err_crc,
Counters.bfk_cnt_err_rx_buf_alloc,
Counters.bfk_cnt_err_rx_packet,
Counters.bfk_cnt_err_too_big_can_tx,
Counters.bfk_cnt_lost_interf,
Counters.bfk_cnt_marker_bpop,
Counters.bfk_cnt_marker_bud,
Counters.bfk_cnt_timeout_marker_bpop,
Counters.bfk_cnt_timeout_marker_bud,
],
'bpop': [
Counters.bpop_cnt_err_adc_spi_overrun,
Counters.bpop_cnt_err_crc,
Counters.bpop_cnt_err_marker_access,
Counters.bpop_cnt_err_rx_pkt,
Counters.bpop_cnt_marker,
Counters.bpop_cnt_marker_other,
],
'bud': [
Counters.bud_cnt_err_crc,
Counters.bud_cnt_err_kachalka_brake,
Counters.bud_cnt_err_kachalka_timeout,
Counters.bud_cnt_err_marker_access,
Counters.bud_cnt_err_ref_missed_impulses,
Counters.bud_cnt_err_rx_overflow,
Counters.bud_cnt_err_rx_packet,
Counters.bud_cnt_err_sp_tx_alloc,
Counters.bud_cnt_marker,
Counters.bud_cnt_marker_other,
Counters.bud_cnt_mbx_cmd_busy,
],
'bud_board': [
TelemetryAttrs.power_bpop15v,
TelemetryAttrs.power_bpop5v,
TelemetryAttrs.power_bud10v,
TelemetryAttrs.power_bud27vo,
TelemetryAttrs.power_bud27vi,
],
'fp': [
TelemetryAttrs.tu2_temperature,
TelemetryAttrs.fp_temperature,
],
'mi': [
TelemetryAttrs.mi1_temperature,
TelemetryAttrs.mi2_temperature,
TelemetryAttrs.mi1_heater_state,
TelemetryAttrs.mi2_heater_state,
],
'mk': [
TelemetryAttrs.mk1_temperature,
TelemetryAttrs.mk2_temperature,
TelemetryAttrs.mk_heater_state,
],
'ppt': [
TelemetryAttrs.ppt_zone,
TelemetryAttrs.ppt_ref,
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_in_zone,
TelemetryAttrs.scanner_angle,
],
'ppt_direction': [
TelemetryAttrs.ppt_direction,
TelemetryAttrs.ifg_max_index,
],
'str': [
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature
],
},
'thresholds': {
'default': {
'rules': 0.55,
'bfk': 0.2,
'bpop': 0.4,
'bud': 6.,
'bud_board': 15.,
'fp': 0.7,
'mi': 0.4,
'mk': 0.09,
'ppt': 0.27,
'ppt_direction': 0.1,
'str': 0.05,
'PptRiple': 100,
'PptSampleCount': 100,
'ScannerAngle': 610,
'Str27V': 210,
'StrSensorTu1': 100,
'StrSensorTu2': 100,
},
},
}
@dataclass
class Config:
data: dict
@property
def models_dir(self) -> str:
return self.data['models_dir']
@property
def tensorboard_dir(self) -> str:
return self.data['tensorboard_dir']
@property
def analysis_result_dir(self) -> str:
return self.data['analysis_result_dir']
@property
def signals_for_predictor(self) -> List[str]:
return self.data['predictor_for'] or []
@property
def signals_groups(self) -> List[SignalsGroup]:
return [
SignalsGroup(name=group_name, signals=signals)
for group_name, signals in (self.data['autoencoder_for'] or {}).items()
]
@property
def thresholds(self) -> dict:
return self.data['thresholds'] or {}
def dump_default_config() -> str:
with open(DEFAULT_CONFIG_PATH, 'w') as f:
yaml.dump(DEFAULT_CONFIG, stream=f, indent=2, explicit_start=True, sort_keys=False)
return DEFAULT_CONFIG_PATH
def load_config(path: str) -> Config:
with open(path, 'r') as f:
return Config(data=yaml.load(f, Loader=yaml.FullLoader))
| true | true |
f71d58cc0b0e4753056bd8d988469378f2edf90f | 1,851 | py | Python | setup.py | strongio/snorkel | 0282a4f2323fc7bd4b0eb2a950b52d2a5d88b0c1 | [
"Apache-2.0"
] | 1 | 2021-04-22T05:10:25.000Z | 2021-04-22T05:10:25.000Z | setup.py | strongio/strong-snorkel | 0282a4f2323fc7bd4b0eb2a950b52d2a5d88b0c1 | [
"Apache-2.0"
] | null | null | null | setup.py | strongio/strong-snorkel | 0282a4f2323fc7bd4b0eb2a950b52d2a5d88b0c1 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from setuptools import find_packages, setup
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import snorkel.
VERSION: Dict[str, str] = {}
with open("snorkel/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
# Use README.md as the long_description for the package
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name="strong_snorkel",
version=VERSION["VERSION"],
url="https://github.com/strongio/strong-snorkel",
description="A system for quickly generating training data with weak supervision, modified by strong.io",
author="Snorkel team, Strong Analytics",
author_email="contact@strong.io",
long_description_content_type="text/markdown",
long_description=long_description,
license="Apache License 2.0",
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
project_urls={
"Homepage": "https://snorkel.org",
"Source": "https://github.com/snorkel-team/snorkel/",
"Bug Reports": "https://github.com/snorkel-team/snorkel/issues",
"Citation": "https://doi.org/10.14778/3157794.3157797",
},
packages=find_packages(),
include_package_data=True,
install_requires=[
"munkres>=1.0.6",
"numpy>=1.16.0,<2.0.0",
"scipy>=1.2.0,<2.0.0",
"pandas>=0.25.0,<2.0.0",
"tqdm>=4.33.0,<5.0.0",
"scikit-learn==0.22.*",
"torch>=1.2.0,<2.0.0",
"tensorboard==2.2.0",
"networkx>=2.2,<2.4",
],
python_requires=">=3.6",
keywords="machine-learning ai weak-supervision",
)
| 34.924528 | 109 | 0.645057 | from typing import Dict
from setuptools import find_packages, setup
VERSION: Dict[str, str] = {}
with open("snorkel/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
# Use README.md as the long_description for the package
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name="strong_snorkel",
version=VERSION["VERSION"],
url="https://github.com/strongio/strong-snorkel",
description="A system for quickly generating training data with weak supervision, modified by strong.io",
author="Snorkel team, Strong Analytics",
author_email="contact@strong.io",
long_description_content_type="text/markdown",
long_description=long_description,
license="Apache License 2.0",
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
project_urls={
"Homepage": "https://snorkel.org",
"Source": "https://github.com/snorkel-team/snorkel/",
"Bug Reports": "https://github.com/snorkel-team/snorkel/issues",
"Citation": "https://doi.org/10.14778/3157794.3157797",
},
packages=find_packages(),
include_package_data=True,
install_requires=[
"munkres>=1.0.6",
"numpy>=1.16.0,<2.0.0",
"scipy>=1.2.0,<2.0.0",
"pandas>=0.25.0,<2.0.0",
"tqdm>=4.33.0,<5.0.0",
"scikit-learn==0.22.*",
"torch>=1.2.0,<2.0.0",
"tensorboard==2.2.0",
"networkx>=2.2,<2.4",
],
python_requires=">=3.6",
keywords="machine-learning ai weak-supervision",
)
| true | true |
f71d59aafb808ab77a0ad8aad5cbe47cf5905790 | 907 | py | Python | team_9/cocos/test/test_repeat.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | 1 | 2019-09-15T18:59:49.000Z | 2019-09-15T18:59:49.000Z | team_9/cocos/test/test_repeat.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | team_9/cocos/test/test_repeat.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1, s, t 3, s, t 4, s, q"
tags = "Repeat"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import Repeat, Rotate
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x//2, y//2) )
self.add( self.sprite )
self.sprite.do( Repeat ( Rotate( 360, 3 ) ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| 25.194444 | 72 | 0.674752 | from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "t 0.1, s, t 1, s, t 3, s, t 4, s, q"
tags = "Repeat"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import Repeat, Rotate
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x//2, y//2) )
self.add( self.sprite )
self.sprite.do( Repeat ( Rotate( 360, 3 ) ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| true | true |
f71d59c9f4b83ffa1095a7b021868296f0f4e159 | 2,890 | py | Python | vnpy/app/cta_strategy/strategies/double_ma_strategy.py | wenhaoLong/vnpyTrader | ff37e288042b9d9f9350a1c528a53c77d56ae849 | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/double_ma_strategy.py | wenhaoLong/vnpyTrader | ff37e288042b9d9f9350a1c528a53c77d56ae849 | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/double_ma_strategy.py | wenhaoLong/vnpyTrader | ff37e288042b9d9f9350a1c528a53c77d56ae849 | [
"MIT"
] | 2 | 2021-03-07T18:13:21.000Z | 2021-12-13T10:20:10.000Z | from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
)
from vnpy.trader.object import (
TickData,
BarData,
TradeData,
OrderData,
)
from vnpy.trader.utility import (
BarGenerator,
ArrayManager,
)
class DoubleMaStrategy(CtaTemplate):
author = "中科云集"
fast_window = 10
slow_window = 20
fast_ma0 = 0.0
fast_ma1 = 0.0
slow_ma0 = 0.0
slow_ma1 = 0.0
parameters = ["fast_window", "slow_window"]
variables = ["fast_ma0", "fast_ma1", "slow_ma0", "slow_ma1"]
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(DoubleMaStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = ArrayManager()
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
self.put_event()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
self.put_event()
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
am = self.am
am.update_bar(bar)
if not am.inited:
return
fast_ma = am.sma("c", self.fast_window, array=True)
self.fast_ma0 = fast_ma[-1]
self.fast_ma1 = fast_ma[-2]
slow_ma = am.sma("c", self.slow_window, array=True)
self.slow_ma0 = slow_ma[-1]
self.slow_ma1 = slow_ma[-2]
cross_over = self.fast_ma0 > self.slow_ma0 and self.fast_ma1 < self.slow_ma1
cross_below = self.fast_ma0 < self.slow_ma0 and self.fast_ma1 > self.slow_ma1
if cross_over:
if self.pos == 0:
self.buy(bar.close_price, 1)
elif self.pos < 0:
self.cover(bar.close_price, 1)
self.buy(bar.close_price, 1)
elif cross_below:
if self.pos == 0:
self.short(bar.close_price, 1)
elif self.pos > 0:
self.sell(bar.close_price, 1)
self.short(bar.close_price, 1)
self.put_event()
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| 23.495935 | 85 | 0.552595 | from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
)
from vnpy.trader.object import (
TickData,
BarData,
TradeData,
OrderData,
)
from vnpy.trader.utility import (
BarGenerator,
ArrayManager,
)
class DoubleMaStrategy(CtaTemplate):
author = "中科云集"
fast_window = 10
slow_window = 20
fast_ma0 = 0.0
fast_ma1 = 0.0
slow_ma0 = 0.0
slow_ma1 = 0.0
parameters = ["fast_window", "slow_window"]
variables = ["fast_ma0", "fast_ma1", "slow_ma0", "slow_ma1"]
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
super(DoubleMaStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = ArrayManager()
def on_init(self):
self.write_log("策略初始化")
self.load_bar(10)
def on_start(self):
self.write_log("策略启动")
self.put_event()
def on_stop(self):
self.write_log("策略停止")
self.put_event()
def on_tick(self, tick: TickData):
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
am = self.am
am.update_bar(bar)
if not am.inited:
return
fast_ma = am.sma("c", self.fast_window, array=True)
self.fast_ma0 = fast_ma[-1]
self.fast_ma1 = fast_ma[-2]
slow_ma = am.sma("c", self.slow_window, array=True)
self.slow_ma0 = slow_ma[-1]
self.slow_ma1 = slow_ma[-2]
cross_over = self.fast_ma0 > self.slow_ma0 and self.fast_ma1 < self.slow_ma1
cross_below = self.fast_ma0 < self.slow_ma0 and self.fast_ma1 > self.slow_ma1
if cross_over:
if self.pos == 0:
self.buy(bar.close_price, 1)
elif self.pos < 0:
self.cover(bar.close_price, 1)
self.buy(bar.close_price, 1)
elif cross_below:
if self.pos == 0:
self.short(bar.close_price, 1)
elif self.pos > 0:
self.sell(bar.close_price, 1)
self.short(bar.close_price, 1)
self.put_event()
def on_order(self, order: OrderData):
pass
def on_trade(self, trade: TradeData):
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
pass
| true | true |
f71d59ed2467f45cf5c7f7741efd4096b5a794bd | 1,573 | py | Python | test_individual.py | pallamidessi/mvrptv | 6d4844108a4c008522b8bfb39b59f420b9484496 | [
"MIT"
] | 1 | 2019-09-03T09:18:03.000Z | 2019-09-03T09:18:03.000Z | test_individual.py | pallamidessi/mvrptv | 6d4844108a4c008522b8bfb39b59f420b9484496 | [
"MIT"
] | null | null | null | test_individual.py | pallamidessi/mvrptv | 6d4844108a4c008522b8bfb39b59f420b9484496 | [
"MIT"
] | 2 | 2015-06-15T07:21:39.000Z | 2021-11-23T09:48:01.000Z | #-*- coding:utf8 -*-
"""
This code contains tests for the functions of the class representing
an individual.
"""
#import copy
import random
import genome
import model
LIST_ORDER = []
random.seed(666)
for i in range(0, 12):
LIST_ORDER.append([])
for j in range(0, 2):
LIST_ORDER[i].append(random.randrange(1, 6))
LIST_ORDER[i].append(10-sum(LIST_ORDER[i]))
LIST_APPOINTMENTS = model.generate_route(
12,
10,
1000,
1000,
model.Point(500, 500))
LIST_INDIVIDUALS = []
# print("Dataset:\n")
for i in range(0, 12):
# print("Appointments: ")
# print(LIST_APPOINTMENTS)
offset = 0
list_routes = []
vehicle_list = LIST_ORDER[i]
for vehicleCount in vehicle_list:
for j in range(offset, offset+vehicleCount):
list_routes.append(j)
offset += vehicleCount
LIST_INDIVIDUALS.append(
genome.MvrpIndividual(
[list_routes,
LIST_ORDER[i]]
)
)
print str(LIST_INDIVIDUALS[i].decode(LIST_APPOINTMENTS)) + " " + \
str(LIST_INDIVIDUALS[i].is_time_constraint_respected(
LIST_APPOINTMENTS))
#print("Time respected: ")
#print(LIST_INDIVIDUALS[i].is_time_constraint_respected(LIST_APPOINTMENTS))
#print("Load respected: ")
#print(LIST_INDIVIDUALS[i].is_load_respected())
#print(LIST_INDIVIDUALS[i].decode(LIST_APPOINTMENTS))
RANDOM_DATA = [[1, 2, 3], [4, 5, 6, 7, 8], [9, 10, 12, 11]]
TEST_VALUE = genome.MvrpIndividual([0, 1])
TEST_VALUE.encode(RANDOM_DATA)
print RANDOM_DATA
print TEST_VALUE
| 25.370968 | 79 | 0.651621 |
"""
This code contains tests for the functions of the class representing
an individual.
"""
import random
import genome
import model
LIST_ORDER = []
random.seed(666)
for i in range(0, 12):
LIST_ORDER.append([])
for j in range(0, 2):
LIST_ORDER[i].append(random.randrange(1, 6))
LIST_ORDER[i].append(10-sum(LIST_ORDER[i]))
LIST_APPOINTMENTS = model.generate_route(
12,
10,
1000,
1000,
model.Point(500, 500))
LIST_INDIVIDUALS = []
for i in range(0, 12):
offset = 0
list_routes = []
vehicle_list = LIST_ORDER[i]
for vehicleCount in vehicle_list:
for j in range(offset, offset+vehicleCount):
list_routes.append(j)
offset += vehicleCount
LIST_INDIVIDUALS.append(
genome.MvrpIndividual(
[list_routes,
LIST_ORDER[i]]
)
)
print str(LIST_INDIVIDUALS[i].decode(LIST_APPOINTMENTS)) + " " + \
str(LIST_INDIVIDUALS[i].is_time_constraint_respected(
LIST_APPOINTMENTS))
RANDOM_DATA = [[1, 2, 3], [4, 5, 6, 7, 8], [9, 10, 12, 11]]
TEST_VALUE = genome.MvrpIndividual([0, 1])
TEST_VALUE.encode(RANDOM_DATA)
print RANDOM_DATA
print TEST_VALUE
| false | true |
f71d5a0785d3f6fa6ad4add5d7311030717d4550 | 21,178 | py | Python | code_cr.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | 1 | 2022-02-19T20:22:05.000Z | 2022-02-19T20:22:05.000Z | code_cr.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | null | null | null | code_cr.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | null | null | null | import re
import numpy as np
import pandas as pd
import requests #웹통신
import json
from pmdarima.arima import ndiffs
import pmdarima as pm
from pykrx import stock
from bs4 import BeautifulSoup
import html5lib
# ==============
# 업종 분류
# ==============
# -------- 동일 업종 기업 출력
# TODO(미완성) 동일 업종 선택
def select_same_industry(corp_name):
indus=com_df[com_df['nm']==corp_name]['industry'].values[0] # TODO(df 확인)
# print(com_df.groupby(by='industry')['nm'].nunique().max()) # 동종업계 최대 151개 -> 151개 재무제표 크롤링?
list_com=com_df[com_df['industry']==indus]['corp_name'].values.tolist()
return list_com
# -------- 네이버증권 연관기업 코드(hjh)
def relate_code_crawl(co):
#연관 종목코드 있는 페이지 불러오기
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
#연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)
code_list=page[4].columns.tolist()
code_list=code_list[1:]
#종목코드 리스트 반환
codes=[]
for word in (code_list):
codes.append(word[-6:])
#print(codes)
return codes
#relate_code_crawl('000660')
# ==============
# 기업 이름 코드 변환
# ==============
# -------- 네이버 재무제표 크롤링 용 gicode로 변환
def nm_to_bs_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['cd']
gi=gi.values[0]
return gi
def stc_code_to_bs_gicode(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['cd']
gi = gi.values[0]
return gi
def yh_code_to_bs_gicode(yh_code):
gi = com_df[com_df['yh_code'] == yhcode]['cd']
gi = gi.values[0]
return gi
# -------- 네이버 금융 크롤링 용 gicode로 변환
def nm_to_fn_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['stock_code']
gi=gi.values[0]
return gi
def yh_code_to_fn_gicode(yh_code):
gi=com_df[com_df['yh_code']==yh_code]['stock_code']
gi=gi.values[0]
return gi
# -------- 코드를 기업이름으로 변환
def stc_code_to_nm(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['nm']
gi = gi.values[0]
return gi
def yh_code_to_nm(yh_code):
gi = com_df[com_df['yh_code'] == yh_code]['nm']
gi = gi.values[0]
return gi
# ==============
# 데이터 수집
# ==============
# -------- Balance Sheets API call
# def bs_api(corp_name=None, yh_code=None, stock_code=None):
# print('haha')
# -------- Balance Sheets Crawling(재무제표 크롤링)
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환
# 3) '~계산에 참여한 계정 펼치기' 제거는 선택사항으로 둠
def bs_craw(stock_code, clear_name=False): # ------- 검색과 연동해서 입력 변수 설정
"""
# kind
: 0 (연간 포괄손익계산서), 1 (분기별 포괄손익계산서)
2 (연간 재무상태표), 3 (분기별 재무상태표)
4 (연간 현금흐름표), 5 (분기별 현금프름표)
"""
# ------- 검색과 연동해서 입력되는 변수 따라 gicode(네이버에서 분류하는 기업 코드)로 변환
gcode = stc_code_to_bs_gicode(stock_code)
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}"
table_list = pd.read_html(url, encoding='UTF-8')
# 항목에서 불필요한 부분 제거('계산에 참여한 계정 펼치기')
if clear_name == False:
return table_list
else:
new_table_list = []
for tbl in table_list:
for i, idx in enumerate(tbl.iloc[:, 0]):
m = idx.replace('계산에 참여한 계정 펼치기', '')
tbl.iloc[i, 0] = m
new_table_list.append(tbl)
return new_table_list
# ------- 네이버 금융
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환
def fn_craw(stock_code):
"""
# kind
: 0 (전일&당일 상한가, 하한가, 거래량 등) #TODO 가공 필요
1 (증권사 별 매도 매수 정보) #TODO 가공 필요(컬럼이름)
2 (외국인, 기관 거래 정보) #TODO 가공 필요
3 (기업실적분석(연도별 분기별 주요재무 정보)) #TODO 가공 필요?
4 (동일업종비교) #TODO 가공 필요?
5 (시가총액, 주식수, 액면가 정보) #TODO 가공 필요
6 (외국인 주식 한도, 보유 정보)
7 (목표주가 정보) #TODO 가공 필요
8 (PER, PBR 배당수익률 정보) (주가 따라 변동) #TODO 가공 필요
9 (동일업종 PER, 등락률 정보) #TODO 가공 필요
10 (호가 10단계)
11 (인기 검색 종목: 코스피) #TODO 가공 필요
12 (인기 검색 종목: 코스닥) #TODO 가공 필요
"""
gcode = str(stock_code)
url = f"https://finance.naver.com/item/main.naver?code={gcode}"
table_list = pd.read_html(url, encoding='euc-kr')
return table_list
# ==============
# 지표 선정
# ==============
# 220222 날씨 수정 시작 ---------------------------------------------
# -------- 지표 선정
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌
# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용
# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)
def idv_radar_weather_data(stock_code):
"""
# <지표 설명>
# 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)
# 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)
# 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수
# 4. 수익성분석 -> 매출수익성(당기순이익/매출액))
# 5. 성장성분석 -> 순이익성장률
"""
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)
foreign_ms = fn_craw(gcode)[2].loc[1, '외국인'] # 2 : 외국인, 기관 거래 정보
giguan_ms = fn_craw(gcode)[2].loc[1, '기관'] # 2 : 외국인, 기관 거래 정보
if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
else:
# 0. 재무정보는 최신 분기 실공시 기준
# 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임
sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문
sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
# 1. 배당성향(bd_tend)
bd_tend = sil_df_y[15] # 실제 배당 성향
# 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)
# 당좌자산 = (유동자산 - 재고자산)
dj_rate = sil_df_q[7] # 당좌비율
# 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수
bch_rate = sil_df_q[6] / 100 # 부채비율
bch_rate = round((1 / bch_rate) * 100, 2)
# 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?
dg_bene = sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
# 5. 성장성 분석 - 순이익성장률(지속성장 가능률)
# (1-배당성향)*자기자본순이익률(ROE)
# 유보율
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
# weather part----------------
# PER?
weather_per = sil_df_y[10]
# PBR
weather_pbr = sil_df_y[12]
# ROE
weather_roe = sil_df_y[5]
# EPS
weather_eps = sil_df_y[9]
# BPS
weather_bps = sil_df_y[11]
# array
weather_arr = np.array([weather_per, weather_pbr, weather_roe, weather_eps, weather_bps])
return data_arr, weather_arr, nm, foreign_ms, giguan_ms
# 수정수정수정
# -------- 관련 기업 지표 선정(상대적 비율 기준)
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)
# 220222 날씨
def relate_radar_weather_data(stock_code):
label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']
arr_list = []
# 주식 코드,이름으로 변환
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
# 다섯 개 회사가 안에 있다
arr_list = [idv_radar_weather_data(stock_code=stcd) for stcd in relate_corp]
# arr_list에서 데이터 분리
radar_list = [x[0] for x in arr_list if x is not None]
weather_list = [x[1] for x in arr_list if x is not None]
nm_list = [x[2] for x in arr_list if x is not None]
# 외인 매수, 기관 매수
try:
foreign_ms = arr_list[0][3]
except TypeError:
foreign_ms=0.01
try:
giguan_ms = arr_list[0][4]
except TypeError:
giguan_ms=0.01
# radar_chart_data
radar_list = np.array(radar_list)
radar_list[:, 0] = (radar_list[:, 0] / radar_list[:, 0].mean()) * 100
radar_list[:, 1] = (radar_list[:, 1] / radar_list[:, 1].mean()) * 100
radar_list[:, 2] = (radar_list[:, 2] / radar_list[:, 2].mean()) * 100
radar_list[:, 3] = (radar_list[:, 3] / radar_list[:, 3].mean()) * 100
radar_list[:, 4] = (radar_list[:, 4] / radar_list[:, 4].mean()) * 100
# radar_chart_dict
radar_dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = radar_list[i, :].tolist()
radar_dict_list.append(dic)
# weather_chart_data
weather_list = np.array(weather_list)
weather_list[:, 0] = (weather_list[:, 0] / weather_list[:, 0].mean()) # 각 기업의 평균 대비 PER
weather_list[:, 1] = (weather_list[:, 1] / weather_list[:, 1].mean()) # 각 기업의 평균 대비 PBR
weather_list[:, 2] = (weather_list[:, 2] / weather_list[:, 2].mean()) # 각 기업의 평균 대비 ROE
weather_list[:, 3] = (weather_list[:, 3] / weather_list[:, 3].mean()) # 각 기업의 평균 대비 EPS
weather_list[:, 4] = (weather_list[:, 4] / weather_list[:, 4].mean()) # 각 기업의 평균 대비 BPS
weather_list=np.round(weather_list, 2)
return label_list, radar_dict_list, weather_list[0], foreign_ms, giguan_ms
# 220222 날씨 수정 끝 ---------------------------------------------
# ==============
# 지표 선정
# ==============
# -------- 지표 선정
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌
# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용
# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)
def idv_radar_data(stock_code):
"""
# <지표 설명>
# 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)
# 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)
# 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수
# 4. 수익성분석 -> 매출수익성(당기순이익/매출액))
# 5. 성장성분석 -> 순이익성장률
"""
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)
if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
else:
# 0. 재무정보는 최신 분기 실공시 기준
# 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임
sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문
sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
# 1. 배당성향(bd_tend)
bd_tend = sil_df_y[15] # 실제 배당 성향
# 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)
# 당좌자산 = (유동자산 - 재고자산)
dj_rate = sil_df_q[7] # 당좌비율
# 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수
bch_rate = sil_df_q[6] / 100 # 부채비율
bch_rate = round((1 / bch_rate) * 100, 2)
# 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?
dg_bene = sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
# 5. 성장성 분석 - 순이익성장률(지속성장 가능률)
# (1-배당성향)*자기자본순이익률(ROE)
# 유보율
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
return data_arr, nm
# -------- 관련 기업 지표 선정(상대적 비율 기준)
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)
def relate_radar_data(stock_code):
label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']
arr_list = []
# 주식 코드,이름으로 변환
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
nm_list = [x[1] for x in arr_list if x is not None]
arr_list = [x[0] for x in arr_list if x is not None]
arr_list = np.array(arr_list)
arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100
arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100
arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100
arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100
arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100
dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = arr_list[i, :].tolist()
dict_list.append(dic)
return label_list, dict_list
# -------- 관련 기업 지표 선정(원본)
# def relate_radar_data(yh_code=None, corp_name=None, stock_code=None):
# label_list=['배당성향', '유동성', '건전성', '수익성', '성장성']
# dict_list = []
#
# # 주식 코드로 변환
# gcode = 0
# if yh_code != None:
# gcode = yh_code_to_fn_gicode(yh_code)
# elif corp_name != None:
# gcode = nm_to_fn_gicode(corp_name)
# elif stock_code != None:
# gcode = stock_code
#
# relate_corp = relate_code_crawl(co=gcode)
#
# dict_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
#
# dict_list = [x for x in dict_list if x is not None]
#
#
# return label_list, dict_list
# ==============
# 시각화
# ==============
# -------- 매출, 당기순이익 추이 그래프
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) 크롤링한 데이터는 list로 받아오므로 kind 없애고 직접 인덱스 처리
def mch_dg(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
bs_df = bs_craw(stock_code=gcode)[0]
label_list = bs_df.columns[1:6].tolist() # 네 분기 + 전년동기
mch_list = bs_df.loc[0, label_list].tolist() # 매출액
dg_list = bs_df.loc[15, label_list].tolist() # 당기순이익
return label_list, mch_list, dg_list
def icon_selection(index_array):
res=[]
for idx in index_array:
if 3<idx :
res.append("RAIN")
elif ( 1.2<idx and idx<=3 ):
res.append("CLOUDY")
elif ( 0.8<idx and idx<=1.2 ):
res.append("PARTLY_CLOUDY_DAY")
elif ( 0<idx and idx<=0.8 ):
res.append("CLEAR_DAY")
else:
res.append("SNOW")
return res
def foreign_giguan(index_array):
res = []
for idx in index_array:
if idx >0:
res.append("CLEAR_DAY")
elif idx==0:
res.append("CLOUDY")
else:
res.append("RAIN")
return res
# ====================================================
# 데이터
# ====================================================
# -------- 병합 파일 불러오기
com_df=pd.read_csv('com_df.csv',
dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},
parse_dates=['listed_date', '상장일'])
# -------- 뉴스 크롤링
def news_crawl(gi):
tot_list = []
for p in range(1):
# 뉴스 기사 모인 페이지
url = 'https://m.stock.naver.com/domestic/stock/' + str(gi) + '/news/title' # https://m.stock.naver.com/domestic/stock/003550/total
#F12누르면 나오는 네트워크상에서 찾아온 경로
#https://m.stock.naver.com/api/news/stock/005930?pageSize=20&page=1&searchMethod=title_entity_id.basic
url = "https://m.stock.naver.com/api/news/stock/"+str(gi)+"?pageSize=5&searchMethod=title_entity_id.basic&page=1"
res = requests.get(url)
news_list = json.loads(res.text)
#페이지에서 가져온 전체 뉴스기사를 for문으로 분리
#print(news_list[0])
for i, news in enumerate(news_list) :
#신문사 id
a=news['items'][0]['officeId']
#기사 id
b=news['items'][0]['articleId']
list = []
list.append(news['items'][0]['officeName']) #신문사
list.append(news['items'][0]['datetime'][:8]) #날짜
list.append(news['items'][0]['title'].replace('"','\"')) #제목
list.append(news['items'][0]['imageOriginLink']) #이미지
list.append(news['items'][0]['body'].replace('"','\"')) # 기사 내용
list.append('https://m.stock.naver.com/domestic/stock/005930/news/view/'+str(a)+'/'+str(b)) #기사 url
tot_list.append(list)
news_df = pd.DataFrame(data=tot_list, columns=['offname','rdate','title','imgsrc','content','url'])
news_df['title'] = news_df['title'].str.replace('&', '&')
news_df['content'] = news_df['content'].str.replace('&', '&')
#news_df['title'] = [re.sub('[^A-Za-z0-9가-힣]', '' ,s) for s in news_df['title']]
#news_df.to_csv('css.csv',index=False)
return news_df
#co-종목코드
def relate_code_crawl(co):
#연관 종목코드 있는 페이지 불러오기
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
#연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)
code_list=page[4].columns.tolist()
code_list=code_list[1:]
#종목코드 리스트 반환
codes=[]
for word in (code_list):
codes.append(word[-6:])
#print(codes)
return codes
# def before_1w_kospi(date):
# before1w=date-timedelta(days=7)
# return fdr.DataReader('KS11',before1w)[['Close']]#, fdr.DataReader('KQ11',before1w)
def invest_opinion(gcode):
url='https://finance.naver.com/item/coinfo.naver?code='+str(gcode)
page=pd.read_html(url,encoding='CP949')
try:
a,b=page[3][1].tolist()[0][:4].split('.')
return ((int(a)+int(b)/100)/5)*100 #의견 점수 구한 후 백분율로 다시 변환
except ValueError:
return 0.1
#최상현 함수
def crawl_ifrs(gcode):
url = "http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A"+gcode+"&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=701"
table_list = pd.read_html(url, encoding='UTF-8')
ifrs = table_list[10]
ifrs = ifrs.fillna('9999999999')
for i in range(1, 5):
if ifrs.iloc[:, i].dtype == 'O':
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: '9999999999' if type(x) == str else x)
print(ifrs.iloc[:, i])
ifrs.iloc[:, i] = ifrs.iloc[:, i].astype('float')
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: format(float(x), ','))
ifrs = pd.concat([ifrs.iloc[:, 0], ifrs['Annual']], axis=1)
ifrs = ifrs.astype(str)
for i in range(1, 5):
ifrs.iloc[:12, i] = ifrs.iloc[:12, i].apply(lambda x: x[:-2])
ifrs.iloc[18:21, i] = ifrs.iloc[18:21, i].apply(lambda x: x[:-2])
ifrs.iloc[23:24, i] = ifrs.iloc[23:24, i].apply(lambda x: x[:-2])
ifrs = ifrs.replace(['9,999,999,999', '9,999,999,999.0'], ['-', '-'])
ifrs.rename(columns={'IFRS(연결)': ''}, inplace=True)
ifrs = ifrs.to_html(justify="right", index=False, classes="table")
ifrs = ifrs.replace('border="1"', 'border="0"')
pd.options.display.float_format = '{:,.0f}'.format
ifrs = ifrs.replace('<td>', '<td align="right">')
ifrs = ifrs.replace('<th>', '<th style="text-align: right;">')
ifrs = ifrs.replace('halign="left"', 'style="text-align: center;"')
ifrs = ifrs.replace('class ="dataframe table"',
'class ="dataframe table" style = "table-layout:fixed;word-break:break-all;"')
return (ifrs)
def ori_code(yh_code):
origin_stock=com_df[com_df['yh_code']==yh_code]['stock_code_ori'].values[0]
return origin_stock
# 아리마 모델
def stock_predict(code,ptype):
data = stock.get_market_ohlcv_by_date(fromdate="20220101", todate="20220222", ticker=str(code))
print(data.head())
data=data[[ptype]]
y_train=data
y_test=data
kpss_diffs = ndiffs(y_train, alpha=0.05, test='kpss', max_d=6)
adf_diffs = ndiffs(y_train, alpha=0.05, test='adf', max_d=6)
n_diffs = max(adf_diffs, kpss_diffs)
print(f"추정된 차수 d = {n_diffs}")
model=pm.auto_arima(y_train,d=n_diffs,seasonal=False,trace=True)
model.fit(y_train)
print(model.summary())
def forecast_one_step():
fc, conf_int = model.predict(n_periods=1 # 한 스텝씩!
, return_conf_int=True) # 신뢰구간 출력
return (
fc.tolist()[0],
np.asarray(conf_int).tolist()[0]
)
forecasts = []
y_pred = []
pred_upper = []
pred_lower = []
for new_ob in y_test[ptype]:
fc, conf = forecast_one_step()
y_pred.append(int(fc))
pred_upper.append(conf[1])
pred_lower.append(conf[0])
## 모형 업데이트 !!
model.update(new_ob)
fc_last = model.predict(n_periods=1 # 한 스텝씩!
)
df=pd.DataFrame({"test": y_test[ptype], "pred": y_pred})
print(df.tail())
def MAE(y_test, y_pred):
return np.mean(np.abs((df['test']-df['pred'])/df['test']))*100
mae=np.round(MAE(y_test, y_pred).astype('float'),4)
print(f"MAE: {MAE(y_test, y_pred):.3f}")
price_list=[]
return int(fc_last),mae
| 29.53696 | 140 | 0.551044 | import re
import numpy as np
import pandas as pd
import requests
import json
from pmdarima.arima import ndiffs
import pmdarima as pm
from pykrx import stock
from bs4 import BeautifulSoup
import html5lib
def select_same_industry(corp_name):
indus=com_df[com_df['nm']==corp_name]['industry'].values[0]
dustry']==indus]['corp_name'].values.tolist()
return list_com
def relate_code_crawl(co):
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
code_list=page[4].columns.tolist()
code_list=code_list[1:]
codes=[]
for word in (code_list):
codes.append(word[-6:])
return codes
def nm_to_bs_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['cd']
gi=gi.values[0]
return gi
def stc_code_to_bs_gicode(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['cd']
gi = gi.values[0]
return gi
def yh_code_to_bs_gicode(yh_code):
gi = com_df[com_df['yh_code'] == yhcode]['cd']
gi = gi.values[0]
return gi
def nm_to_fn_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['stock_code']
gi=gi.values[0]
return gi
def yh_code_to_fn_gicode(yh_code):
gi=com_df[com_df['yh_code']==yh_code]['stock_code']
gi=gi.values[0]
return gi
def stc_code_to_nm(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['nm']
gi = gi.values[0]
return gi
def yh_code_to_nm(yh_code):
gi = com_df[com_df['yh_code'] == yh_code]['nm']
gi = gi.values[0]
return gi
def bs_craw(stock_code, clear_name=False):
gcode = stc_code_to_bs_gicode(stock_code)
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}"
table_list = pd.read_html(url, encoding='UTF-8')
if clear_name == False:
return table_list
else:
new_table_list = []
for tbl in table_list:
for i, idx in enumerate(tbl.iloc[:, 0]):
m = idx.replace('계산에 참여한 계정 펼치기', '')
tbl.iloc[i, 0] = m
new_table_list.append(tbl)
return new_table_list
def fn_craw(stock_code):
gcode = str(stock_code)
url = f"https://finance.naver.com/item/main.naver?code={gcode}"
table_list = pd.read_html(url, encoding='euc-kr')
return table_list
def idv_radar_weather_data(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3]
foreign_ms = fn_craw(gcode)[2].loc[1, '외국인']
giguan_ms = fn_craw(gcode)[2].loc[1, '기관']
if (sil_df.iloc[0:8, 3].isna().sum()) > 0:
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0:
pass
else:
sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2]
sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
bd_tend = sil_df_y[15]
dj_rate = sil_df_q[7]
bch_rate = sil_df_q[6] / 100
bch_rate = round((1 / bch_rate) * 100, 2)
= sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
weather_per = sil_df_y[10]
weather_pbr = sil_df_y[12]
weather_roe = sil_df_y[5]
weather_eps = sil_df_y[9]
weather_bps = sil_df_y[11]
weather_arr = np.array([weather_per, weather_pbr, weather_roe, weather_eps, weather_bps])
return data_arr, weather_arr, nm, foreign_ms, giguan_ms
def relate_radar_weather_data(stock_code):
label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']
arr_list = []
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
arr_list = [idv_radar_weather_data(stock_code=stcd) for stcd in relate_corp]
radar_list = [x[0] for x in arr_list if x is not None]
weather_list = [x[1] for x in arr_list if x is not None]
nm_list = [x[2] for x in arr_list if x is not None]
try:
foreign_ms = arr_list[0][3]
except TypeError:
foreign_ms=0.01
try:
giguan_ms = arr_list[0][4]
except TypeError:
giguan_ms=0.01
radar_list = np.array(radar_list)
radar_list[:, 0] = (radar_list[:, 0] / radar_list[:, 0].mean()) * 100
radar_list[:, 1] = (radar_list[:, 1] / radar_list[:, 1].mean()) * 100
radar_list[:, 2] = (radar_list[:, 2] / radar_list[:, 2].mean()) * 100
radar_list[:, 3] = (radar_list[:, 3] / radar_list[:, 3].mean()) * 100
radar_list[:, 4] = (radar_list[:, 4] / radar_list[:, 4].mean()) * 100
radar_dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = radar_list[i, :].tolist()
radar_dict_list.append(dic)
weather_list = np.array(weather_list)
weather_list[:, 0] = (weather_list[:, 0] / weather_list[:, 0].mean())
weather_list[:, 1] = (weather_list[:, 1] / weather_list[:, 1].mean())
weather_list[:, 2] = (weather_list[:, 2] / weather_list[:, 2].mean())
weather_list[:, 3] = (weather_list[:, 3] / weather_list[:, 3].mean())
weather_list[:, 4] = (weather_list[:, 4] / weather_list[:, 4].mean())
weather_list=np.round(weather_list, 2)
return label_list, radar_dict_list, weather_list[0], foreign_ms, giguan_ms
def idv_radar_data(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3]
if (sil_df.iloc[0:8, 3].isna().sum()) > 0:
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0:
pass
else:
sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2]
sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
bd_tend = sil_df_y[15]
dj_rate = sil_df_q[7]
bch_rate = sil_df_q[6] / 100
bch_rate = round((1 / bch_rate) * 100, 2)
= sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
return data_arr, nm
def relate_radar_data(stock_code):
label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']
arr_list = []
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
nm_list = [x[1] for x in arr_list if x is not None]
arr_list = [x[0] for x in arr_list if x is not None]
arr_list = np.array(arr_list)
arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100
arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100
arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100
arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100
arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100
dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = arr_list[i, :].tolist()
dict_list.append(dic)
return label_list, dict_list
def mch_dg(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
bs_df = bs_craw(stock_code=gcode)[0]
label_list = bs_df.columns[1:6].tolist()
mch_list = bs_df.loc[0, label_list].tolist()
dg_list = bs_df.loc[15, label_list].tolist()
return label_list, mch_list, dg_list
def icon_selection(index_array):
res=[]
for idx in index_array:
if 3<idx :
res.append("RAIN")
elif ( 1.2<idx and idx<=3 ):
res.append("CLOUDY")
elif ( 0.8<idx and idx<=1.2 ):
res.append("PARTLY_CLOUDY_DAY")
elif ( 0<idx and idx<=0.8 ):
res.append("CLEAR_DAY")
else:
res.append("SNOW")
return res
def foreign_giguan(index_array):
res = []
for idx in index_array:
if idx >0:
res.append("CLEAR_DAY")
elif idx==0:
res.append("CLOUDY")
else:
res.append("RAIN")
return res
com_df=pd.read_csv('com_df.csv',
dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},
parse_dates=['listed_date', '상장일'])
def news_crawl(gi):
tot_list = []
for p in range(1):
url = 'https://m.stock.naver.com/domestic/stock/' + str(gi) + '/news/title'
url = "https://m.stock.naver.com/api/news/stock/"+str(gi)+"?pageSize=5&searchMethod=title_entity_id.basic&page=1"
res = requests.get(url)
news_list = json.loads(res.text)
for i, news in enumerate(news_list) :
a=news['items'][0]['officeId']
b=news['items'][0]['articleId']
list = []
list.append(news['items'][0]['officeName'])
list.append(news['items'][0]['datetime'][:8])
list.append(news['items'][0]['title'].replace('"','\"')) #제목
list.append(news['items'][0]['imageOriginLink']) #이미지
list.append(news['items'][0]['body'].replace('"','\"'))
list.append('https://m.stock.naver.com/domestic/stock/005930/news/view/'+str(a)+'/'+str(b))
tot_list.append(list)
news_df = pd.DataFrame(data=tot_list, columns=['offname','rdate','title','imgsrc','content','url'])
news_df['title'] = news_df['title'].str.replace('&', '&')
news_df['content'] = news_df['content'].str.replace('&', '&')
return news_df
def relate_code_crawl(co):
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
code_list=page[4].columns.tolist()
code_list=code_list[1:]
codes=[]
for word in (code_list):
codes.append(word[-6:])
return codes
rl='https://finance.naver.com/item/coinfo.naver?code='+str(gcode)
page=pd.read_html(url,encoding='CP949')
try:
a,b=page[3][1].tolist()[0][:4].split('.')
return ((int(a)+int(b)/100)/5)*100
except ValueError:
return 0.1
def crawl_ifrs(gcode):
url = "http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A"+gcode+"&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=701"
table_list = pd.read_html(url, encoding='UTF-8')
ifrs = table_list[10]
ifrs = ifrs.fillna('9999999999')
for i in range(1, 5):
if ifrs.iloc[:, i].dtype == 'O':
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: '9999999999' if type(x) == str else x)
print(ifrs.iloc[:, i])
ifrs.iloc[:, i] = ifrs.iloc[:, i].astype('float')
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: format(float(x), ','))
ifrs = pd.concat([ifrs.iloc[:, 0], ifrs['Annual']], axis=1)
ifrs = ifrs.astype(str)
for i in range(1, 5):
ifrs.iloc[:12, i] = ifrs.iloc[:12, i].apply(lambda x: x[:-2])
ifrs.iloc[18:21, i] = ifrs.iloc[18:21, i].apply(lambda x: x[:-2])
ifrs.iloc[23:24, i] = ifrs.iloc[23:24, i].apply(lambda x: x[:-2])
ifrs = ifrs.replace(['9,999,999,999', '9,999,999,999.0'], ['-', '-'])
ifrs.rename(columns={'IFRS(연결)': ''}, inplace=True)
ifrs = ifrs.to_html(justify="right", index=False, classes="table")
ifrs = ifrs.replace('border="1"', 'border="0"')
pd.options.display.float_format = '{:,.0f}'.format
ifrs = ifrs.replace('<td>', '<td align="right">')
ifrs = ifrs.replace('<th>', '<th style="text-align: right;">')
ifrs = ifrs.replace('halign="left"', 'style="text-align: center;"')
ifrs = ifrs.replace('class ="dataframe table"',
'class ="dataframe table" style = "table-layout:fixed;word-break:break-all;"')
return (ifrs)
def ori_code(yh_code):
origin_stock=com_df[com_df['yh_code']==yh_code]['stock_code_ori'].values[0]
return origin_stock
def stock_predict(code,ptype):
data = stock.get_market_ohlcv_by_date(fromdate="20220101", todate="20220222", ticker=str(code))
print(data.head())
data=data[[ptype]]
y_train=data
y_test=data
kpss_diffs = ndiffs(y_train, alpha=0.05, test='kpss', max_d=6)
adf_diffs = ndiffs(y_train, alpha=0.05, test='adf', max_d=6)
n_diffs = max(adf_diffs, kpss_diffs)
print(f"추정된 차수 d = {n_diffs}")
model=pm.auto_arima(y_train,d=n_diffs,seasonal=False,trace=True)
model.fit(y_train)
print(model.summary())
def forecast_one_step():
fc, conf_int = model.predict(n_periods=1
, return_conf_int=True)
return (
fc.tolist()[0],
np.asarray(conf_int).tolist()[0]
)
forecasts = []
y_pred = []
pred_upper = []
pred_lower = []
for new_ob in y_test[ptype]:
fc, conf = forecast_one_step()
y_pred.append(int(fc))
pred_upper.append(conf[1])
pred_lower.append(conf[0])
el.update(new_ob)
fc_last = model.predict(n_periods=1
)
df=pd.DataFrame({"test": y_test[ptype], "pred": y_pred})
print(df.tail())
def MAE(y_test, y_pred):
return np.mean(np.abs((df['test']-df['pred'])/df['test']))*100
mae=np.round(MAE(y_test, y_pred).astype('float'),4)
print(f"MAE: {MAE(y_test, y_pred):.3f}")
price_list=[]
return int(fc_last),mae
| true | true |
f71d5a52c25390f4eb23da08757b775befe75028 | 610 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/celery_app.py | coordt/cookiecutter-django-project | 5466510b62ea178bba238990b367b99bb6d46d37 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/celery_app.py | coordt/cookiecutter-django-project | 5466510b62ea178bba238990b367b99bb6d46d37 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/celery_app.py | coordt/cookiecutter-django-project | 5466510b62ea178bba238990b367b99bb6d46d37 | [
"BSD-3-Clause"
] | null | null | null | import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.project_slug}}.settings")
app = Celery("{{cookiecutter.project_slug}}")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 35.882353 | 89 | 0.781967 | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.project_slug}}.settings")
app = Celery("{{cookiecutter.project_slug}}")
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| true | true |
f71d5ada4f027de23ce274adfb59d424e9964cf2 | 4,696 | py | Python | src/python/pants/option/errors.py | paiforsyth/pants | 15da8db4f25d54b30d50ca4e00c066ae642a099c | [
"Apache-2.0"
] | null | null | null | src/python/pants/option/errors.py | paiforsyth/pants | 15da8db4f25d54b30d50ca4e00c066ae642a099c | [
"Apache-2.0"
] | null | null | null | src/python/pants/option/errors.py | paiforsyth/pants | 15da8db4f25d54b30d50ca4e00c066ae642a099c | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Tuple
from pants.option.scope import GLOBAL_SCOPE
class OptionsError(Exception):
"""An options system-related error."""
# -----------------------------------------------------------------------
# Option registration errors
# -----------------------------------------------------------------------
class RegistrationError(OptionsError):
"""An error at option registration time."""
def __init__(self, scope: str, option: str, **msg_format_args) -> None:
scope_str = "global scope" if scope == GLOBAL_SCOPE else f"scope {scope}"
if self.__doc__ is None:
raise ValueError(
"Invalid RegistrationError definition. "
"Please specify the error message in the docstring."
)
docstring = self.__doc__.format(**msg_format_args)
super().__init__(f"{docstring} [option {option} in {scope_str}].")
class BooleanOptionNameWithNo(RegistrationError):
"""Boolean option names cannot start with --no."""
class DefaultValueType(RegistrationError):
"""Default value {value_type}({default_value!r}) does not match option type {option_type}."""
class DefaultMemberValueType(DefaultValueType):
"""Default member value type mismatch.
Member value {value_type}({member_value!r}) does not match list option type {member_type}.
"""
class HelpType(RegistrationError):
"""The `help=` argument must be a string, but was of type `{help_type}`."""
class ImplicitValIsNone(RegistrationError):
"""Implicit value cannot be None."""
class InvalidKwarg(RegistrationError):
"""Invalid registration kwarg {kwarg}."""
class InvalidKwargNonGlobalScope(RegistrationError):
"""Invalid registration kwarg {kwarg} on non-global scope."""
class InvalidMemberType(RegistrationError):
"""member_type {member_type} not allowed."""
class MemberTypeNotAllowed(RegistrationError):
"""member_type not allowed on option with type {type_}.
It may only be specified if type=list.
"""
class NoOptionNames(RegistrationError):
"""No option names provided."""
class OptionAlreadyRegistered(RegistrationError):
"""An option with this name was already registered on this scope."""
class OptionNameDash(RegistrationError):
"""Option name must begin with a dash."""
class OptionNameDoubleDash(RegistrationError):
"""Long option name must begin with a double-dash."""
class PassthroughType(RegistrationError):
"""Options marked passthrough must be typed as a string list."""
# -----------------------------------------------------------------------
# Flag parsing errors
# -----------------------------------------------------------------------
class ParseError(OptionsError):
"""An error at flag parsing time."""
class BooleanConversionError(ParseError):
"""Indicates a value other than 'True' or 'False' when attempting to parse a bool."""
class FromfileError(ParseError):
"""Indicates a problem reading a value @fromfile."""
class MutuallyExclusiveOptionError(ParseError):
"""Indicates that two options in the same mutually exclusive group were specified."""
class UnknownFlagsError(ParseError):
"""Indicates that unknown command-line flags were encountered in some scope."""
def __init__(self, flags: Tuple[str, ...], arg_scope: str):
self.flags = flags
self.arg_scope = arg_scope
scope = f"scope {self.arg_scope}" if self.arg_scope else "global scope"
msg = f"Unknown flags {', '.join(self.flags)} on {scope}"
super().__init__(msg)
# -----------------------------------------------------------------------
# Config parsing errors
# -----------------------------------------------------------------------
class ConfigError(OptionsError):
"""An error encountered while parsing a config file."""
class ConfigValidationError(ConfigError):
"""A config file is invalid."""
class NoSectionError(ConfigError):
def __init__(self, section: str):
super().__init__(f"No section: {section}")
class NoOptionError(ConfigError):
def __init__(self, option: str, section: str):
super().__init__(f"No option {option} in section {section}")
class InterpolationMissingOptionError(ConfigError):
def __init__(self, option, section, rawval, reference):
super().__init__(
self,
f"Bad value substitution: option {option} in section {section} contains an "
f"interpolation key {reference} which is not a valid option name. Raw value: {rawval}",
)
| 30.102564 | 99 | 0.636286 |
from typing import Tuple
from pants.option.scope import GLOBAL_SCOPE
class OptionsError(Exception):
class RegistrationError(OptionsError):
def __init__(self, scope: str, option: str, **msg_format_args) -> None:
scope_str = "global scope" if scope == GLOBAL_SCOPE else f"scope {scope}"
if self.__doc__ is None:
raise ValueError(
"Invalid RegistrationError definition. "
"Please specify the error message in the docstring."
)
docstring = self.__doc__.format(**msg_format_args)
super().__init__(f"{docstring} [option {option} in {scope_str}].")
class BooleanOptionNameWithNo(RegistrationError):
class DefaultValueType(RegistrationError):
class DefaultMemberValueType(DefaultValueType):
class HelpType(RegistrationError):
class ImplicitValIsNone(RegistrationError):
class InvalidKwarg(RegistrationError):
class InvalidKwargNonGlobalScope(RegistrationError):
class InvalidMemberType(RegistrationError):
class MemberTypeNotAllowed(RegistrationError):
class NoOptionNames(RegistrationError):
class OptionAlreadyRegistered(RegistrationError):
class OptionNameDash(RegistrationError):
class OptionNameDoubleDash(RegistrationError):
class PassthroughType(RegistrationError):
class ParseError(OptionsError):
class BooleanConversionError(ParseError):
class FromfileError(ParseError):
class MutuallyExclusiveOptionError(ParseError):
class UnknownFlagsError(ParseError):
def __init__(self, flags: Tuple[str, ...], arg_scope: str):
self.flags = flags
self.arg_scope = arg_scope
scope = f"scope {self.arg_scope}" if self.arg_scope else "global scope"
msg = f"Unknown flags {', '.join(self.flags)} on {scope}"
super().__init__(msg)
class ConfigError(OptionsError):
class ConfigValidationError(ConfigError):
class NoSectionError(ConfigError):
def __init__(self, section: str):
super().__init__(f"No section: {section}")
class NoOptionError(ConfigError):
def __init__(self, option: str, section: str):
super().__init__(f"No option {option} in section {section}")
class InterpolationMissingOptionError(ConfigError):
def __init__(self, option, section, rawval, reference):
super().__init__(
self,
f"Bad value substitution: option {option} in section {section} contains an "
f"interpolation key {reference} which is not a valid option name. Raw value: {rawval}",
)
| true | true |
f71d5c7dea7fb36ad7b6764aa8378fb2265ca95a | 21 | py | Python | python/testData/types/CopyDotCopy/copy.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/types/CopyDotCopy/copy.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/types/CopyDotCopy/copy.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def copy(x):
pass | 10.5 | 12 | 0.571429 | def copy(x):
pass | true | true |
f71d5cdf640218fcea7f8ea7e9d1c86192c50c7b | 4,371 | py | Python | projects/speedup/centerX2onnx.py | donnyyou/centerX | 6e381cb669a6014d02e31a43915271237690531c | [
"Apache-2.0"
] | 350 | 2020-12-01T09:55:16.000Z | 2020-12-23T13:47:43.000Z | projects/speedup/centerX2onnx.py | powerlic/centerX | 1073753533f26483c3ab053a7d8753708fcacde7 | [
"Apache-2.0"
] | 39 | 2020-12-24T13:42:29.000Z | 2022-02-10T01:09:56.000Z | projects/speedup/centerX2onnx.py | powerlic/centerX | 1073753533f26483c3ab053a7d8753708fcacde7 | [
"Apache-2.0"
] | 49 | 2020-12-01T11:39:14.000Z | 2020-12-21T01:45:39.000Z | from types import MethodType
import onnx
import torch
from torch.onnx import OperatorExportTypes
from onnxsim import simplify
import argparse
import io
import sys
import torch.nn as nn
sys.path.insert(0, '.')
from configs import add_centernet_config
from detectron2.config import get_cfg
from inference.centernet import build_model
from detectron2.checkpoint import DetectionCheckpointer
from fvcore.common.file_io import PathManager
def centerX_forward(self, x):
x = self.normalizer(x / 255.)
y = self._forward(x)
fmap_max = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)(y['cls'])
keep = (y['cls'] - fmap_max).float() + 1e-9
keep = nn.ReLU()(keep)
keep = keep * 1e9
result = y['cls'] * keep
ret = [result,y['reg'],y['wh']] ## change dict to list
return ret
def load_model(config_file,model_path):
cfg = get_cfg()
add_centernet_config(cfg)
cfg.merge_from_file(config_file)
forward = {'centerX': centerX_forward}
# model
model = build_model(cfg)
model.forward = MethodType(forward['centerX'], model)
DetectionCheckpointer(model).load(model_path)
model.eval()
model.cuda()
return model
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to ONNX model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--model-path",
metavar="FILE",
help="path to model",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='onnx_model',
help='path to save converted onnx model'
)
parser.add_argument(
"--input_w",
default=640,
type=int,
help='image_width'
)
parser.add_argument(
"--input_h",
default=384,
type=int,
help='image_height'
)
return parser
def remove_initializer_from_input(model):
if model.ir_version < 4:
print(
'Model with ir_version below 4 requires to include initilizer in graph input'
)
return
inputs = model.graph.input
name_to_input = {}
for input in inputs:
name_to_input[input.name] = input
for initializer in model.graph.initializer:
if initializer.name in name_to_input:
inputs.remove(name_to_input[initializer.name])
return model
def export_onnx_model(model, inputs):
"""
Trace and export a model to onnx format.
Args:
model (nn.Module):
inputs (torch.Tensor): the model will be called by `model(*inputs)`
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training state
# of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
# Apply ONNX's Optimization
all_passes = onnx.optimizer.get_available_passes()
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer", "fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnx.optimizer.optimize(onnx_model, passes)
return onnx_model
if __name__ == '__main__':
args = get_parser().parse_args()
model = load_model(args.config_file, args.model_path)
inputs = torch.randn(1, 3, args.input_h, args.input_w).cuda()
onnx_model = export_onnx_model(model, inputs)
model_simp, check = simplify(onnx_model)
model_simp = remove_initializer_from_input(model_simp)
assert check, "Simplified ONNX model could not be validated"
PathManager.mkdirs(args.output)
onnx.save_model(model_simp, f"{args.output}/{args.name}.onnx")
print(f"Export onnx model in {args.output} successfully!")
| 28.019231 | 101 | 0.653397 | from types import MethodType
import onnx
import torch
from torch.onnx import OperatorExportTypes
from onnxsim import simplify
import argparse
import io
import sys
import torch.nn as nn
sys.path.insert(0, '.')
from configs import add_centernet_config
from detectron2.config import get_cfg
from inference.centernet import build_model
from detectron2.checkpoint import DetectionCheckpointer
from fvcore.common.file_io import PathManager
def centerX_forward(self, x):
x = self.normalizer(x / 255.)
y = self._forward(x)
fmap_max = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)(y['cls'])
keep = (y['cls'] - fmap_max).float() + 1e-9
keep = nn.ReLU()(keep)
keep = keep * 1e9
result = y['cls'] * keep
ret = [result,y['reg'],y['wh']] load_model(config_file,model_path):
cfg = get_cfg()
add_centernet_config(cfg)
cfg.merge_from_file(config_file)
forward = {'centerX': centerX_forward}
model = build_model(cfg)
model.forward = MethodType(forward['centerX'], model)
DetectionCheckpointer(model).load(model_path)
model.eval()
model.cuda()
return model
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to ONNX model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--model-path",
metavar="FILE",
help="path to model",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='onnx_model',
help='path to save converted onnx model'
)
parser.add_argument(
"--input_w",
default=640,
type=int,
help='image_width'
)
parser.add_argument(
"--input_h",
default=384,
type=int,
help='image_height'
)
return parser
def remove_initializer_from_input(model):
if model.ir_version < 4:
print(
'Model with ir_version below 4 requires to include initilizer in graph input'
)
return
inputs = model.graph.input
name_to_input = {}
for input in inputs:
name_to_input[input.name] = input
for initializer in model.graph.initializer:
if initializer.name in name_to_input:
inputs.remove(name_to_input[initializer.name])
return model
def export_onnx_model(model, inputs):
assert isinstance(model, torch.nn.Module)
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
onnx_model = onnx.load_from_string(f.getvalue())
all_passes = onnx.optimizer.get_available_passes()
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer", "fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnx.optimizer.optimize(onnx_model, passes)
return onnx_model
if __name__ == '__main__':
args = get_parser().parse_args()
model = load_model(args.config_file, args.model_path)
inputs = torch.randn(1, 3, args.input_h, args.input_w).cuda()
onnx_model = export_onnx_model(model, inputs)
model_simp, check = simplify(onnx_model)
model_simp = remove_initializer_from_input(model_simp)
assert check, "Simplified ONNX model could not be validated"
PathManager.mkdirs(args.output)
onnx.save_model(model_simp, f"{args.output}/{args.name}.onnx")
print(f"Export onnx model in {args.output} successfully!")
| true | true |
f71d5cefc3da853120a2daae786970fa0e13e6a6 | 5,995 | py | Python | basis_set_exchange/cli/bsecurate_cli.py | atomse/basis_set_exchange | 7ffd64082c14d2f61eb43f1c2d44792e8b0e394e | [
"BSD-3-Clause"
] | null | null | null | basis_set_exchange/cli/bsecurate_cli.py | atomse/basis_set_exchange | 7ffd64082c14d2f61eb43f1c2d44792e8b0e394e | [
"BSD-3-Clause"
] | null | null | null | basis_set_exchange/cli/bsecurate_cli.py | atomse/basis_set_exchange | 7ffd64082c14d2f61eb43f1c2d44792e8b0e394e | [
"BSD-3-Clause"
] | null | null | null | '''
Command line interface for the basis set exchange
'''
import argparse
import argcomplete
from .. import version
from .bsecurate_handlers import bsecurate_cli_handle_subcmd
from .check import cli_check_normalize_args
from .complete import cli_case_insensitive_validator, cli_bsname_completer, cli_readerfmt_completer
def run_bsecurate_cli():
################################################################################################
# NOTE: I am deliberately not using the 'choices' argument in add_argument. I could use it
# for formats, etc, however I wouldn't want to use it for basis set names. Therefore, I handle
# all of that manually so that error output is consistent and clean
################################################################################################
########################################
# Main global options
########################################
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-V', action='version', version='basis_set_exchange ' + version())
parser.add_argument('-d', '--data-dir', metavar='PATH', help='Override which data directory to use')
parser.add_argument('-o', '--output', metavar='PATH', help='Output to given file rather than stdout')
subparsers = parser.add_subparsers(metavar='subcommand', dest='subcmd')
subparsers.required = True # https://bugs.python.org/issue9253#msg186387
########################################
# Listing of general info and metadata
########################################
# elements-in-files
subp = subparsers.add_parser('elements-in-files', help='For a list of JSON files, output what elements are in each file')
subp.add_argument('files', nargs='+', help='List of files to inspect')
# elements-in-files
subp = subparsers.add_parser('component-file-refs', help='For a list of component JSON files, output what elements/references are in each file')
subp.add_argument('files', nargs='+', help='List of files to inspect')
########################################
# Printing data
########################################
subp = subparsers.add_parser('print-component-file', help='(Pretty) print the contents of a component file')
subp.add_argument('file', help='File to print')
subp.add_argument('--elements', help='Which elements of the basis set to output. Default is all defined in the given basis')
########################################
# Manipulating basis set data
########################################
# make-diff
subp = subparsers.add_parser('make-diff', help='Find/Store the differences between two groups of files')
subp.add_argument('-l', '--left', nargs='+', required=True, help='Base JSON files')
subp.add_argument('-r', '--right', nargs='+', required=True, help='JSON files with data to subtract from the base files')
########################################
# Comparing
########################################
# compare-basis-sets
subp = subparsers.add_parser('compare-basis-sets', help='Compare two basis sets in the data directory')
subp.add_argument('basis1', help='First basis set to compare').completer = cli_bsname_completer
subp.add_argument('basis2', help='Second basis set to compare').completer = cli_bsname_completer
subp.add_argument('--version1', help='Version of the first basis set to compare with. Default is latest')
subp.add_argument('--version2', help='Version of the second basis set to compare with. Default is latest')
subp.add_argument('--uncontract-general', action='store_true', help='Remove general contractions before comparing')
# compare-basis-files
subp = subparsers.add_parser('compare-basis-files', help='Compare two formatted basis set files')
subp.add_argument('file1', help='First basis set file to compare')
subp.add_argument('file2', help='Second basis set file to compare')
subp.add_argument('--readfmt1', help='Override the file format of file 1').completer = cli_readerfmt_completer
subp.add_argument('--readfmt2', help='Override the file format of file 2').completer = cli_readerfmt_completer
subp.add_argument('--uncontract-general', action='store_true', help='Remove general contractions before comparing')
########################################
# Making graphs
########################################
# view-graph
subp = subparsers.add_parser('view-graph', help='View a file graph for a basis set')
subp.add_argument('basis', help='Name of the basis set inspect').completer = cli_bsname_completer
subp.add_argument('--version', help='Which version of the basis set to inspect. Default is the latest version')
# make-graph-file
subp = subparsers.add_parser('make-graph-file', help='Make a dot file (and png file) ofr a basis set file graph')
subp.add_argument('basis', help='Name of the basis set inspect').completer = cli_bsname_completer
subp.add_argument('outfile', help='Output DOT file to create')
subp.add_argument('--render', action='store_true', help='Render the DOT file into a corresponding png file')
subp.add_argument('--version', help='Which version of the basis set to inspect. Default is the latest version')
#############################
# DONE WITH SUBCOMMANDS
#############################
# setup autocomplete
argcomplete.autocomplete(parser, validator=cli_case_insensitive_validator)
# Now parse and handle the args
args = parser.parse_args()
# Check and make sure basis sets, roles, etc, are valid
args = cli_check_normalize_args(args)
# Actually generate the output
output = bsecurate_cli_handle_subcmd(args)
if args.output:
with open(args.output, 'w', encoding='utf-8') as outfile:
outfile.write(output)
elif output:
# Don't print if output is empty
print(output)
return 0
| 52.130435 | 148 | 0.629191 |
import argparse
import argcomplete
from .. import version
from .bsecurate_handlers import bsecurate_cli_handle_subcmd
from .check import cli_check_normalize_args
from .complete import cli_case_insensitive_validator, cli_bsname_completer, cli_readerfmt_completer
def run_bsecurate_cli():
ding png file')
subp.add_argument('--version', help='Which version of the basis set to inspect. Default is the latest version')
#############################
# DONE WITH SUBCOMMANDS
#############################
# setup autocomplete
argcomplete.autocomplete(parser, validator=cli_case_insensitive_validator)
# Now parse and handle the args
args = parser.parse_args()
# Check and make sure basis sets, roles, etc, are valid
args = cli_check_normalize_args(args)
# Actually generate the output
output = bsecurate_cli_handle_subcmd(args)
if args.output:
with open(args.output, 'w', encoding='utf-8') as outfile:
outfile.write(output)
elif output:
# Don't print if output is empty
print(output)
return 0
| true | true |
f71d5cf776ccdf11c1fcda67f525c795c730a062 | 1,006 | py | Python | listWmflabsdotorgRecordsets.py | Krenair/wmcs-misc-scripts | ae91b5756d73a9405634df9d6c32e98f21b97d5f | [
"Apache-2.0"
] | null | null | null | listWmflabsdotorgRecordsets.py | Krenair/wmcs-misc-scripts | ae91b5756d73a9405634df9d6c32e98f21b97d5f | [
"Apache-2.0"
] | null | null | null | listWmflabsdotorgRecordsets.py | Krenair/wmcs-misc-scripts | ae91b5756d73a9405634df9d6c32e98f21b97d5f | [
"Apache-2.0"
] | null | null | null | import yaml
from keystoneclient.session import Session as KeystoneSession
from keystoneclient.auth.identity.v3 import Password as KeystonePassword
from keystoneclient.v3 import Client as KeystoneClient
from designateclient.v2 import client as designateclient
def get_keystone_session(project):
return KeystoneSession(auth=KeystonePassword(
auth_url="http://cloudcontrol1003.wikimedia.org:5000/v3",
username="novaobserver",
password=open('novaobserver_password').read(),
project_name=project,
user_domain_name='default',
project_domain_name='default'
))
client = designateclient.Client(session=get_keystone_session('wmflabsdotorg'))
zone = client.zones.get('wmflabs.org.')
for recordset in client.recordsets.list(zone['id']):
if recordset['type'] != 'A' or recordset['records'] != ['185.15.56.49']:
print('|' + recordset['name'] + '|' + recordset['type'] + '|' + repr(recordset['records']) + '|' + repr(recordset['description']) + '|')
| 43.73913 | 144 | 0.717694 | import yaml
from keystoneclient.session import Session as KeystoneSession
from keystoneclient.auth.identity.v3 import Password as KeystonePassword
from keystoneclient.v3 import Client as KeystoneClient
from designateclient.v2 import client as designateclient
def get_keystone_session(project):
return KeystoneSession(auth=KeystonePassword(
auth_url="http://cloudcontrol1003.wikimedia.org:5000/v3",
username="novaobserver",
password=open('novaobserver_password').read(),
project_name=project,
user_domain_name='default',
project_domain_name='default'
))
client = designateclient.Client(session=get_keystone_session('wmflabsdotorg'))
zone = client.zones.get('wmflabs.org.')
for recordset in client.recordsets.list(zone['id']):
if recordset['type'] != 'A' or recordset['records'] != ['185.15.56.49']:
print('|' + recordset['name'] + '|' + recordset['type'] + '|' + repr(recordset['records']) + '|' + repr(recordset['description']) + '|')
| true | true |
f71d5e89c15f2a96ab7cbc7f7360399c1dc32ae8 | 895 | py | Python | run_models.py | rustygentile/hornet-model | d6c7dce26de241a17fd7534b0e98b6a0112bf67f | [
"MIT"
] | null | null | null | run_models.py | rustygentile/hornet-model | d6c7dce26de241a17fd7534b0e98b6a0112bf67f | [
"MIT"
] | null | null | null | run_models.py | rustygentile/hornet-model | d6c7dce26de241a17fd7534b0e98b6a0112bf67f | [
"MIT"
] | 1 | 2022-01-19T09:28:18.000Z | 2022-01-19T09:28:18.000Z | import logging
from src.prep_data import main as prep_data
from src.run_sims import run_aggressive_sim, run_conservative_sim
from src.regression import make_and_run_model as run_model
from src.coupled import make_and_run_model as run_coupled
__author__ = 'Rusty Gentile'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logger.info('Prepping data...')
prep_data()
logger.info('Starting simulations...')
run_conservative_sim()
run_aggressive_sim()
logger.info('Starting regression models...')
run_model(2019)
run_model(2020)
run_coupled(2019, './data/results/results_aggr_sim_1.csv', 2023)
run_coupled(2020, './data/results/results_aggr_sim_1.csv', 2023)
| 28.870968 | 69 | 0.694972 | import logging
from src.prep_data import main as prep_data
from src.run_sims import run_aggressive_sim, run_conservative_sim
from src.regression import make_and_run_model as run_model
from src.coupled import make_and_run_model as run_coupled
__author__ = 'Rusty Gentile'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logger.info('Prepping data...')
prep_data()
logger.info('Starting simulations...')
run_conservative_sim()
run_aggressive_sim()
logger.info('Starting regression models...')
run_model(2019)
run_model(2020)
run_coupled(2019, './data/results/results_aggr_sim_1.csv', 2023)
run_coupled(2020, './data/results/results_aggr_sim_1.csv', 2023)
| true | true |
f71d61aa74de71397274ca14541acb7ad4c127e6 | 4,177 | py | Python | scraper/scraper.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | scraper/scraper.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | scraper/scraper.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import os
#! the very same functions are in scraper_artist.py. I just didn't want to make any dependencies with such simple scripts
def find_sublinks(artist_link):
"""Some artists have that many songs so we have multiple pages for them. This functions finds all subpages for given artist
e.g if we have page freemidi/queen_1 script go on that page and seek for all specific hyperlinks.
as a return we could get [freemidi/queen_1, freemidi/queen_2, ...., freemidi/queen_n]
Args:
artist_link (str): link to the home page of the artist
Returns:
_type_: list of all pages with songs that can be reached from the artist_link
"""
links = [artist_link]
URL = f"https://freemidi.org/{artist_link}" # as it's written it works only for freemidi page
artist_page = requests.get(URL)
artist_soup = BeautifulSoup(artist_page.content, "html.parser")
#So we iterate over all specific hyperlinks, and add them to the list
for a in artist_soup.find(class_="pagination").find_all("a"):
link = a["href"]
if link != "#":
links.append(link)
return links
def clean_dir(path):
# it will remove all duplicates
for file in os.listdir(path):
if "(" in file:
os.remove(os.path.join(path, file))
genre_name = input(
"type in genre name (lowercase, no space, no special characters): ")
# Just in case someone don't respect the rules.
genre_name = genre_name.lower()
genre_name = genre_name.strip()
genre_name = "".join(genre_name.split(" "))
URL = f"https://freemidi.org/genre-{genre_name}"
genre_page = requests.get(URL) #we get the page containing all artist that are in that specific genre
genre_soup = BeautifulSoup(genre_page.content, "html.parser")
artists = genre_soup.find_all(class_="genre-link-text") # artist block has such class
working_path = os.path.join(os.getcwd(), genre_name)
if not os.path.isdir(working_path):
os.mkdir(working_path)
already_in = set(os.listdir(working_path)) #If we have some artist we will not download it once again
for artist in artists:
artist_link = artist.find("a")["href"]
artist_name = "".join(artist_link.split("-")[2:])
if artist_name in already_in:
print(f"Skipping {artist_name}")
continue
URL = f"https://freemidi.org/{artist_link}"
artist_page = requests.get(URL)
artist_soup = soup = BeautifulSoup(artist_page.content, "html.parser")
#! In that script I'm also making sure that artist is associated to only one genre. To omit situatoion when same song is in rock and country.
#! Unfortunately tagging with genres is not good when artist has more than 2 tags.
genres = artist_soup.find(class_="col-md-12").find_all("a")
num_of_genres = 0
for a in genres:
if "genre" in a['href']:
num_of_genres += 1
if num_of_genres > 1:
continue
working_path_artist = os.path.join(working_path, artist_name)
if not os.path.isdir(working_path_artist):
os.mkdir(working_path_artist)
options = webdriver.ChromeOptions()
prefs = {"download.default_directory": working_path_artist}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path='../chromedriver', chrome_options=options)
#Here we just iterate over all pages with songs of given artist and try to get all songs from there
for a_link in find_sublinks(artist_link):
URL = f"https://freemidi.org/{a_link}"
artist_page = requests.get(URL)
artist_soup = soup = BeautifulSoup(artist_page.content, "html.parser")
songs = artist_soup.find_all(class_="artist-song-cell")
for song in songs:
print(song)
link = song.find("a")["href"]
try:
driver.get(f"https://freemidi.org/{link}")
gotit = driver.find_element(By.ID, 'downloadmidi')
gotit.click()
except:
continue
clean_dir(working_path_artist)
| 32.632813 | 145 | 0.681111 | from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import os
def find_sublinks(artist_link):
links = [artist_link]
URL = f"https://freemidi.org/{artist_link}" # as it's written it works only for freemidi page
artist_page = requests.get(URL)
artist_soup = BeautifulSoup(artist_page.content, "html.parser")
for a in artist_soup.find(class_="pagination").find_all("a"):
link = a["href"]
if link != "#":
links.append(link)
return links
def clean_dir(path):
for file in os.listdir(path):
if "(" in file:
os.remove(os.path.join(path, file))
genre_name = input(
"type in genre name (lowercase, no space, no special characters): ")
genre_name = genre_name.lower()
genre_name = genre_name.strip()
genre_name = "".join(genre_name.split(" "))
URL = f"https://freemidi.org/genre-{genre_name}"
genre_page = requests.get(URL) #we get the page containing all artist that are in that specific genre
genre_soup = BeautifulSoup(genre_page.content, "html.parser")
artists = genre_soup.find_all(class_="genre-link-text") # artist block has such class
working_path = os.path.join(os.getcwd(), genre_name)
if not os.path.isdir(working_path):
os.mkdir(working_path)
already_in = set(os.listdir(working_path)) #If we have some artist we will not download it once again
for artist in artists:
artist_link = artist.find("a")["href"]
artist_name = "".join(artist_link.split("-")[2:])
if artist_name in already_in:
print(f"Skipping {artist_name}")
continue
URL = f"https://freemidi.org/{artist_link}"
artist_page = requests.get(URL)
artist_soup = soup = BeautifulSoup(artist_page.content, "html.parser")
#! In that script I'm also making sure that artist is associated to only one genre. To omit situatoion when same song is in rock and country.
genres = artist_soup.find(class_="col-md-12").find_all("a")
num_of_genres = 0
for a in genres:
if "genre" in a['href']:
num_of_genres += 1
if num_of_genres > 1:
continue
working_path_artist = os.path.join(working_path, artist_name)
if not os.path.isdir(working_path_artist):
os.mkdir(working_path_artist)
options = webdriver.ChromeOptions()
prefs = {"download.default_directory": working_path_artist}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path='../chromedriver', chrome_options=options)
for a_link in find_sublinks(artist_link):
URL = f"https://freemidi.org/{a_link}"
artist_page = requests.get(URL)
artist_soup = soup = BeautifulSoup(artist_page.content, "html.parser")
songs = artist_soup.find_all(class_="artist-song-cell")
for song in songs:
print(song)
link = song.find("a")["href"]
try:
driver.get(f"https://freemidi.org/{link}")
gotit = driver.find_element(By.ID, 'downloadmidi')
gotit.click()
except:
continue
clean_dir(working_path_artist)
| true | true |
f71d63616367a7865c7b5676a10efc4ac37adc93 | 1,877 | py | Python | modules/readers/vtkStructPtsRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 25 | 2015-08-24T16:05:14.000Z | 2020-12-09T20:07:14.000Z | modules/readers/vtkStructPtsRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T21:18:10.000Z | 2016-02-16T21:18:10.000Z | modules/readers/vtkStructPtsRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 5 | 2016-02-16T20:05:37.000Z | 2020-01-31T11:27:39.000Z | # $Id$
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import vtk
class vtkStructPtsRDR(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkStructuredPointsReader()
# ctor for this specific mixin
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'VTK data (*.vtk)|*.vtk|All files (*)|*',
{'vtkStructuredPointsReader': self._reader})
module_utils.setup_vtk_object_progress(
self, self._reader,
'Reading vtk structured points data')
# set up some defaults
self._config.filename = ''
self.sync_module_logic_with_config()
def close(self):
del self._reader
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ()
def set_input(self, idx, input_stream):
raise Exception
def get_output_descriptions(self):
return ('vtkStructuredPoints',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
filename = self._reader.GetFileName()
if filename == None:
filename = ''
self._config.filename = filename
def config_to_logic(self):
self._reader.SetFileName(self._config.filename)
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
# get the vtkPolyDataReader to try and execute
if len(self._reader.GetFileName()):
self._reader.Update()
| 26.814286 | 60 | 0.640384 |
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import vtk
class vtkStructPtsRDR(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkStructuredPointsReader()
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'VTK data (*.vtk)|*.vtk|All files (*)|*',
{'vtkStructuredPointsReader': self._reader})
module_utils.setup_vtk_object_progress(
self, self._reader,
'Reading vtk structured points data')
self._config.filename = ''
self.sync_module_logic_with_config()
def close(self):
del self._reader
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ()
def set_input(self, idx, input_stream):
raise Exception
def get_output_descriptions(self):
return ('vtkStructuredPoints',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
filename = self._reader.GetFileName()
if filename == None:
filename = ''
self._config.filename = filename
def config_to_logic(self):
self._reader.SetFileName(self._config.filename)
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
if len(self._reader.GetFileName()):
self._reader.Update()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.