hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6f36e3d6234b36ef09fd70fd1be755548b506ba
| 37,741
|
py
|
Python
|
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | 2
|
2020-06-21T12:02:58.000Z
|
2020-09-02T15:21:19.000Z
|
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_apis.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
from __future__ import unicode_literals
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
from xml.etree.ElementTree import ProcessingInstruction
PY3 = sys.version_info[0] == 3
if not PY3:
def bytes(string, encoding):
return string.encode(encoding)
class TestMarkdownBasics(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
def testInstanceExtension(self):
""" Test Extension loading with a class instance. """
from markdown.extensions.footnotes import FootnoteExtension
markdown.Markdown(extensions=[FootnoteExtension()])
def testEntryPointExtension(self):
""" Test Extension loading with an entry point. """
markdown.Markdown(extensions=['footnotes'])
def testDotNotationExtension(self):
""" Test Extension loading with Name (`path.to.module`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes'])
def testDotNotationExtensionWithClass(self):
""" Test Extension loading with class name (`path.to.module:Class`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes:FootnoteExtension'])
class TestConvertFile(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = BytesIO(bytes('foo', encoding='utf-8'))
sys.stdout = BytesIO()
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read().decode('utf-8'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = markdown.util.etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(
markdown.serializers.to_xhtml_string(root),
"<div><p>foo</p></div>"
)
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assertIsInstance(tree, markdown.util.etree.ElementTree)
self.assertIs(markdown.util.etree.iselement(tree.getroot()), True)
self.assertEqual(
markdown.serializers.to_xhtml_string(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>"
)
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class Item(object):
""" A dummy Registry item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
class RegistryTests(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existant item with strict=False
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existant item with strict=True
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
# TODO: restore this when deprecated __setitem__ is removed.
# with self.assertRaises(TypeError):
# r['a'] = 'a'
# TODO: remove this when deprecated __setitem__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r['a'] = Item('a')
self.assertEqual(list(r), ['a'])
r['b'] = Item('b')
self.assertEqual(list(r), ['a', 'b'])
r['a'] = Item('a1')
self.assertEqual(list(r), ['a1', 'b'])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
# TODO: restore this when deprecated __del__ is removed.
# with self.assertRaises(TypeError):
# del r['a']
# TODO: remove this when deprecated __del__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r.register(Item('b'), 'b', 15)
r.register(Item('c'), 'c', 10)
del r['b']
self.assertEqual(list(r), ['a', 'c'])
del r['a']
self.assertEqual(list(r), ['c'])
with self.assertRaises(TypeError):
del r['badname']
del r['c']
self.assertEqual(list(r), [])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
def testRegistryDeprecatedAdd(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r = markdown.util.Registry()
# Add first item
r.add('c', Item('c'), '_begin')
self.assertEqual(list(r), ['c'])
# Added to beginning
r.add('b', Item('b'), '_begin')
self.assertEqual(list(r), ['b', 'c'])
# Add before first item
r.add('a', Item('a'), '<b')
self.assertEqual(list(r), ['a', 'b', 'c'])
# Add before non-first item
r.add('a1', Item('a1'), '<b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'c'])
# Add after non-last item
r.add('b1', Item('b1'), '>b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c'])
# Add after last item
r.add('d', Item('d'), '>c')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd'])
# Add to end
r.add('e', Item('e'), '_end')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd', 'e'])
with self.assertRaises(ValueError):
r.add('f', Item('f'), 'badlocation')
# Check the warnings
self.assertEqual(len(w), 7)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
class TestErrors(unittest.TestCase):
""" Test Error Reporting. """
def setUp(self):
# Set warnings to be raised as errors
warnings.simplefilter('error')
def tearDown(self):
# Reset warning behavior back to default
warnings.simplefilter('default')
def testNonUnicodeSource(self):
""" Test falure on non-unicode source text. """
if not PY3:
source = "foo".encode('utf-16')
self.assertRaises(UnicodeDecodeError, markdown.markdown, source)
def testBadOutputFormat(self):
""" Test failure on bad output_format. """
self.assertRaises(KeyError, markdown.Markdown, output_format='invalid')
def testLoadExtensionFailure(self):
""" Test failure of an extension to load. """
self.assertRaises(
ImportError,
markdown.Markdown, extensions=['non_existant_ext']
)
def testLoadBadExtension(self):
""" Test loading of an Extension with no makeExtension function. """
self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util'])
def testNonExtension(self):
""" Test loading a non Extension object as an extension. """
self.assertRaises(TypeError, markdown.Markdown, extensions=[object])
def testDotNotationExtensionWithBadClass(self):
""" Test Extension loading with non-existant class name (`path.to.module:Class`). """
self.assertRaises(
AttributeError,
markdown.Markdown,
extensions=['markdown.extensions.footnotes:MissingExtension']
)
def testBaseExtention(self):
""" Test that the base Extension class will raise NotImplemented. """
self.assertRaises(
NotImplementedError,
markdown.Markdown, extensions=[markdown.extensions.Extension()]
)
class testETreeComments(unittest.TestCase):
"""
Test that ElementTree Comments work.
These tests should only be a concern when using cElementTree with third
party serializers (including markdown's (x)html serializer). While markdown
doesn't use ElementTree.Comment itself, we should certainly support any
third party extensions which may. Therefore, these tests are included to
ensure such support is maintained.
"""
def setUp(self):
# Create comment node
self.comment = markdown.util.etree.Comment('foo')
if hasattr(markdown.util.etree, 'test_comment'):
self.test_comment = markdown.util.etree.test_comment
else:
self.test_comment = markdown.util.etree.Comment
def testCommentIsComment(self):
""" Test that an ElementTree Comment passes the `is Comment` test. """
self.assertIs(self.comment.tag, markdown.util.etree.test_comment)
def testCommentIsBlockLevel(self):
""" Test that an ElementTree Comment is recognized as BlockLevel. """
md = markdown.Markdown()
self.assertIs(md.is_block_level(self.comment.tag), False)
def testCommentSerialization(self):
""" Test that an ElementTree Comment serializes properly. """
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->'
)
def testCommentPrettify(self):
""" Test that an ElementTree Comment is prettified properly. """
pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
pretty.run(self.comment)
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->\n'
)
class testElementTailTests(unittest.TestCase):
""" Element Tail Tests """
def setUp(self):
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
def testBrTailNoNewline(self):
""" Test that last <br> in tree has a new line tail """
root = markdown.util.etree.Element('root')
br = markdown.util.etree.SubElement(root, 'br')
self.assertEqual(br.tail, None)
self.pretty.run(root)
self.assertEqual(br.tail, "\n")
class testSerializers(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = markdown.util.etree.Element('MixedCase')
el.text = 'not valid '
em = markdown.util.etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
markdown.util.etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of ProcessignInstruction. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of QName tag. """
div = markdown.util.etree.Element('div')
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = markdown.util.etree.SubElement(div, qname)
math.set('display', 'block')
sem = markdown.util.etree.SubElement(math, 'semantics')
msup = markdown.util.etree.SubElement(sem, 'msup')
mi = markdown.util.etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = markdown.util.etree.SubElement(msup, 'mn')
mn.text = '2'
ann = markdown.util.etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of QName attribute. """
div = markdown.util.etree.Element('div')
div.set(markdown.util.etree.QName('foo'), markdown.util.etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of QName with no tag. """
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML')
el = markdown.util.etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test QName escaping. """
qname = markdown.util.etree.QName('<&"test\nescaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test QName that is already partially escaped. """
qname = markdown.util.etree.QName('<&"test escaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers fakeSerializer. """
def fakeSerializer(elem):
# Ignore input and return hardcoded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
class testAtomicString(unittest.TestCase):
""" Test that AtomicStrings are honored (not parsed). """
def setUp(self):
md = markdown.Markdown()
self.inlineprocessor = md.treeprocessors['inline']
def testString(self):
""" Test that a regular string is parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = 'some *text*'
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some <em>text</em></p></div>'
)
def testSimpleAtomicString(self):
""" Test that a simple AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('some *text*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some *text*</p></div>'
)
def testNestedAtomicString(self):
""" Test that a nested AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('*some* ')
span1 = markdown.util.etree.SubElement(p, 'span')
span1.text = markdown.util.AtomicString('*more* ')
span2 = markdown.util.etree.SubElement(span1, 'span')
span2.text = markdown.util.AtomicString('*text* ')
span3 = markdown.util.etree.SubElement(span2, 'span')
span3.text = markdown.util.AtomicString('*here*')
span3.tail = markdown.util.AtomicString(' *to*')
span2.tail = markdown.util.AtomicString(' *test*')
span1.tail = markdown.util.AtomicString(' *with*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>*some* <span>*more* <span>*text* <span>*here*</span> '
'*to*</span> *test*</span> *with*</p></div>'
)
class TestConfigParsing(unittest.TestCase):
def assertParses(self, value, result):
self.assertIs(markdown.util.parseBoolValue(value, False), result)
def testBooleansParsing(self):
self.assertParses(True, True)
self.assertParses('novalue', None)
self.assertParses('yES', True)
self.assertParses('FALSE', False)
self.assertParses(0., False)
self.assertParses('none', False)
def testPreserveNone(self):
self.assertIsNone(markdown.util.parseBoolValue('None', preserve_none=True))
self.assertIsNone(markdown.util.parseBoolValue(None, preserve_none=True))
def testInvalidBooleansParsing(self):
self.assertRaises(ValueError, markdown.util.parseBoolValue, 'novalue')
class TestCliOptionParsing(unittest.TestCase):
""" Test parsing of Command Line Interface Options. """
def setUp(self):
self.default_options = {
'input': None,
'output': None,
'encoding': None,
'output_format': 'xhtml',
'lazy_ol': True,
'extensions': [],
'extension_configs': {},
}
self.tempfile = ''
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
def testNoOptions(self):
options, logging_level = parse_options([])
self.assertEqual(options, self.default_options)
self.assertEqual(logging_level, CRITICAL)
def testQuietOption(self):
options, logging_level = parse_options(['-q'])
self.assertGreater(logging_level, CRITICAL)
def testVerboseOption(self):
options, logging_level = parse_options(['-v'])
self.assertEqual(logging_level, WARNING)
def testNoisyOption(self):
options, logging_level = parse_options(['--noisy'])
self.assertEqual(logging_level, DEBUG)
def testInputFileOption(self):
options, logging_level = parse_options(['foo.txt'])
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testOutputFileOption(self):
options, logging_level = parse_options(['-f', 'foo.html'])
self.default_options['output'] = 'foo.html'
self.assertEqual(options, self.default_options)
def testInputAndOutputFileOptions(self):
options, logging_level = parse_options(['-f', 'foo.html', 'foo.txt'])
self.default_options['output'] = 'foo.html'
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testEncodingOption(self):
options, logging_level = parse_options(['-e', 'utf-8'])
self.default_options['encoding'] = 'utf-8'
self.assertEqual(options, self.default_options)
def testOutputFormatOption(self):
options, logging_level = parse_options(['-o', 'html'])
self.default_options['output_format'] = 'html'
self.assertEqual(options, self.default_options)
def testNoLazyOlOption(self):
options, logging_level = parse_options(['-n'])
self.default_options['lazy_ol'] = False
self.assertEqual(options, self.default_options)
def testExtensionOption(self):
options, logging_level = parse_options(['-x', 'markdown.extensions.footnotes'])
self.default_options['extensions'] = ['markdown.extensions.footnotes']
self.assertEqual(options, self.default_options)
def testMultipleExtensionOptions(self):
options, logging_level = parse_options([
'-x', 'markdown.extensions.footnotes',
'-x', 'markdown.extensions.smarty'
])
self.default_options['extensions'] = [
'markdown.extensions.footnotes',
'markdown.extensions.smarty'
]
self.assertEqual(options, self.default_options)
def create_config_file(self, config):
""" Helper to create temp config files. """
if not isinstance(config, markdown.util.string_type):
# convert to string
config = yaml.dump(config)
fd, self.tempfile = tempfile.mkstemp('.yml')
with os.fdopen(fd, 'w') as fp:
fp.write(config)
def testExtensionConfigOption(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def textBoolExtensionConfigOption(self):
config = {
'markdown.extensions.toc': {
'title': 'Some Title',
'anchorlink': True,
'permalink': True
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionAsJSON(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
import json
self.create_config_file(json.dumps(config))
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionMissingFile(self):
self.assertRaises(IOError, parse_options, ['-c', 'missing_file.yaml'])
def testExtensionConfigOptionBadFormat(self):
config = """
[footnotes]
PLACE_MARKER= ~~~footnotes~~~
"""
self.create_config_file(config)
self.assertRaises(yaml.YAMLError, parse_options, ['-c', self.tempfile])
class TestEscapeAppend(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
class TestBlockAppend(unittest.TestCase):
""" Tests block kHTML append. """
def testBlockAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.block_level_elements.append('test')
self.assertEqual('test' in md.block_level_elements, True)
md2 = markdown.Markdown()
self.assertEqual('test' not in md2.block_level_elements, True)
class TestAncestorExclusion(unittest.TestCase):
""" Tests exclusion of tags in ancestor list. """
class AncestorExample(markdown.inlinepatterns.SimpleTagInlineProcessor):
""" Ancestor Test. """
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
""" Handle match. """
el = markdown.util.etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class AncestorExtension(markdown.Extension):
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {}
def extendMarkdown(self, md):
"""Modify inline patterns."""
pattern = r'(\+)([^\+]+)\1'
md.inlinePatterns.register(TestAncestorExclusion.AncestorExample(pattern, 'strong'), 'ancestor-test', 0)
def setUp(self):
"""Setup markdown object."""
self.md = markdown.Markdown(extensions=[TestAncestorExclusion.AncestorExtension()])
def test_ancestors(self):
""" Test that an extension can exclude parent tags. """
test = """
Some +test+ and a [+link+](http://test.com)
"""
result = """<p>Some <strong>test</strong> and a <a href="http://test.com">+link+</a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
def test_ancestors_tail(self):
""" Test that an extension can exclude parent tags when dealing with a tail. """
test = """
[***+em+*+strong+**](http://test.com)
"""
result = """<p><a href="http://test.com"><strong><em>+em+</em>+strong+</strong></a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
class TestGeneralDeprecations(unittest.TestCase):
"""Test general deprecations."""
def test_version_deprecation(self):
"""Test that version is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version = markdown.version
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version, markdown.__version__)
def test_version_info_deprecation(self):
"""Test that version info is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version_info = markdown.version_info
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version_info, markdown.__version_info__)
def test_deprecation_wrapper_dir(self):
"""Tests the `__dir__` attribute of the class as it replaces the module's."""
dir_attr = dir(markdown)
self.assertNotIn('version', dir_attr)
self.assertIn('__version__', dir_attr)
self.assertNotIn('version_info', dir_attr)
self.assertIn('__version_info__', dir_attr)
| 36.081262
| 116
| 0.601123
| 4,138
| 37,741
| 5.413001
| 0.150073
| 0.070985
| 0.037948
| 0.027724
| 0.487522
| 0.409393
| 0.32939
| 0.26961
| 0.253985
| 0.226037
| 0
| 0.007248
| 0.254233
| 37,741
| 1,045
| 117
| 36.115789
| 0.788567
| 0.133462
| 0
| 0.338753
| 0
| 0.009485
| 0.110431
| 0.032452
| 0
| 0
| 0
| 0.000957
| 0.219512
| 1
| 0.155827
| false
| 0
| 0.020325
| 0.00542
| 0.214092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f658acae15a3e9ea6e4c377ee45743db7b0897
| 6,365
|
py
|
Python
|
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | 2
|
2020-04-28T03:49:10.000Z
|
2020-04-28T03:49:13.000Z
|
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/optim/ftrl.py
|
XinYao1994/mindspore
|
2c1a2bf752a1fde311caddba22633d2f4f63cb4e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
"""Apply ftrl optimizer to the weight parameter."""
success = True
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
return success
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
validator.check_value_type("lr_power", lr_power, [float], prim_name)
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
validator.check_value_type("l1", l1, [float], prim_name)
validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
validator.check_value_type("l2", l2, [float], prim_name)
validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
class FTRL(Optimizer):
"""
Implement the FTRL algorithm with ApplyFtrl Operator.
FTRL is an online convex optimization algorithm that adaptively chooses its regularization function
based on the loss functions. Refer to paper `Adaptive Bound Optimization for Online Convex Optimization
<https://arxiv.org/abs/1002.4908>`_. Refer to paper `Ad Click Prediction: a View from the Trenches
<https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf>`_ for engineering document.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be Parameter.
initial_accum (float): The starting value for accumulators, must be zero or positive values. Default: 0.1.
learning_rate (float): The learning rate value, should be positive. Default: 0.001.
lr_power (float): Learning rate power controls how the learning rate decreases during training, must be less
than or equal to zero. Use fixed learning rate if lr_power is zero. Default: -0.5.
l1 (float): l1 regularization strength, must be greater than or equal to zero. Default: 0.0.
l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0.
use_locking (bool): If True use locks for update operation. Default: False.
loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0.
wegith_decay (float): Weight decay value to multiply weight, must be zero or positive value. Default: 0.0.
Inputs:
- **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`
in optimizer.
Outputs:
tuple[Parameter], the updated parameters, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = nn.FTRL(net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None)
"""
def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
use_locking=False, loss_scale=1.0, weight_decay=0.0):
super(FTRL, self).__init__(learning_rate, params)
_check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
self.cls_name)
self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
self.linear = self.parameters.clone(prefix="linear", init='zeros')
self.l1 = l1
self.l2 = l2
self.lr_power = lr_power
self.reciprocal_scale = 1.0 / loss_scale
self.weight_decay = weight_decay
self.decay_tf = tuple((lambda: True)() for x in self.parameters)
self.hyper_map = C.HyperMap()
self.opt = P.ApplyFtrl(use_locking=use_locking)
self.one = Tensor(1, mstype.int32)
def construct(self, grads):
params = self.parameters
moments = self.moments
linear = self.linear
if self.weight_decay > 0.0:
grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
if self.reciprocal_scale != 1.0:
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
lr = self.learning_rate
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
linear, grads, params, moments)
return success
| 50.11811
| 116
| 0.692066
| 901
| 6,365
| 4.73141
| 0.261931
| 0.045039
| 0.055829
| 0.07225
| 0.243725
| 0.205724
| 0.115412
| 0.103214
| 0.094065
| 0.085386
| 0
| 0.020424
| 0.192302
| 6,365
| 126
| 117
| 50.515873
| 0.808792
| 0.410998
| 0
| 0.033898
| 0
| 0
| 0.05915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.118644
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f688088bfa1088bfe7257d2cece961dd478353
| 5,106
|
py
|
Python
|
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | null | null | null |
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | 13
|
2016-01-05T14:48:38.000Z
|
2017-08-14T10:17:41.000Z
|
aws_utils/region_selector.py
|
skimhub/aws-utils
|
5496a7594ab90b1e658e8f9f8137e8943a39be1e
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import boto3
US_EAST_REGION = {'us-east-1'}
US_EAST_AVAILABILITY_ZONES = {'us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1e'} # note d is missing
INSTANCE_VERSION = 'Linux/UNIX (Amazon VPC)'
def fetch_spot_prices(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION):
"""Fetches prices of EC2 spot instances from AWS.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
Returns:
yield str, float: yields tuple of avialability_zone and price over the period
Raises: ValueError,
raised in the event that the boto3 response is empty.
"""
conn = boto3.client('ec2', region_name=region)
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version])
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
while token:
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version],
NextToken=token)
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
def fetch_price_stats_per_availability_zone(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION,
filter_availability_zones=None):
"""Groups raw prices by region, returns min, max and avg price.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
filter_availability_zones ({str}): if set then we only return a price if the availability zone is in this list
Returns: dict,
{'us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0}}
"""
by_zone = {}
for zone, price in fetch_spot_prices(region, start_time, end_time, instance_type, instance_version):
by_zone.setdefault(zone, []).append(price)
prices_per_region = {}
for zone, prices in by_zone.iteritems():
if filter_availability_zones is None or zone in filter_availability_zones:
region_prices = {'min': min(prices),
'max': max(prices),
'avg': sum(prices) / float(len(prices)),
'latest': prices[0]}
prices_per_region[zone] = region_prices
return prices_per_region
def get_cheapest_availability_zone(instance_type, search_regions=US_EAST_REGION,
filter_availability_zones=US_EAST_AVAILABILITY_ZONES, expected_job_length=datetime.timedelta(days=1)):
"""Get the cheapest availability zone from a set of regions. Cheapest is determined by 'latest price + average price'
over the duration that the job is expected to run for
Args:
filter_availability_zones ({str}): We only return results for zones in this set
instance_type (str): Type of aws instance e.g. "m2.4xlarge"
search_regions ({str}): Set of regions we want to look for availability zones in.
expected_job_length (datetime.timedelta): The period we expect the job to run this is used as the amount of time to look back over
for the average
Returns:
(str, {}) : e.g. ('us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0})
"""
if isinstance(search_regions, str):
search_regions = {search_regions}
aggregated_prices = {}
for region in search_regions:
result_stats = fetch_price_stats_per_availability_zone(region,
datetime.datetime.utcnow() - expected_job_length,
datetime.datetime.utcnow(),
instance_type,
filter_availability_zones=filter_availability_zones)
if not len(result_stats):
raise Exception("No valid avialability zones found for region %s" % (region,))
aggregated_prices.update(result_stats)
cheapest_availability_zone, stats = min(aggregated_prices.iteritems(), key=lambda x: x[1]['avg'] + x[1]['latest'])
return cheapest_availability_zone, stats
| 44.4
| 138
| 0.604387
| 597
| 5,106
| 4.971524
| 0.256281
| 0.022237
| 0.061995
| 0.018194
| 0.393194
| 0.370283
| 0.370283
| 0.34535
| 0.34535
| 0.34535
| 0
| 0.010759
| 0.308265
| 5,106
| 114
| 139
| 44.789474
| 0.829558
| 0.321582
| 0
| 0.226415
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.037736
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f805f6f11f261c37210a559213d4def9f1debd
| 952
|
py
|
Python
|
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
app/internal/daily_quotes.py
|
yammesicka/calendar
|
7c15a24883dbdffb563b6d3286c2d458e4a1c9c0
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date
from typing import Dict, Optional
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import func
from app.database.models import Quote
TOTAL_DAYS = 366
def create_quote_object(quotes_fields: Dict[str, Optional[str]]) -> Quote:
"""This function create a quote object from given fields dictionary.
It is used for adding the data from the json into the db"""
return Quote(
text=quotes_fields['text'],
author=quotes_fields['author']
)
def quote_per_day(
session: Session, date: date = date.today()
) -> Optional[Quote]:
"""This function provides a daily quote, relevant to the current
day of the year. The quote is randomally selected from a set
of quotes matching to the given day"""
day_num = date.timetuple().tm_yday
quote = session.query(Quote).filter(
Quote.id % TOTAL_DAYS == day_num).order_by(func.random()).first()
return quote
| 30.709677
| 74
| 0.711134
| 138
| 952
| 4.811594
| 0.5
| 0.054217
| 0.051205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003937
| 0.19958
| 952
| 30
| 75
| 31.733333
| 0.867454
| 0.295168
| 0
| 0
| 0
| 0
| 0.015432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.277778
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6fc051294ab288b08cbb146da00f8c04ac171dd
| 413
|
py
|
Python
|
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
telemetry/Truck.py
|
SnipsMine/ETS2-Speedrun-Tool
|
5ac87e4bc88be67ff4954b2b98772ff14a65eb48
|
[
"MIT"
] | null | null | null |
from telemetry.TruckConstants import ConstantValues
from telemetry.TruckCurrent import CurrentValues
from telemetry.TruckPositioning import Positioning
class TruckValues:
constant_values = None
current_values = None
positioning = None
def __init__(self):
self.current_values = CurrentValues()
self.constant_values = ConstantValues()
self.positioning = Positioning()
| 21.736842
| 51
| 0.750605
| 39
| 413
| 7.74359
| 0.461538
| 0.129139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196126
| 413
| 18
| 52
| 22.944444
| 0.909639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6fc89e2fb95df50b778c64242f30654175e9df4
| 566
|
py
|
Python
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 3
|
2021-11-13T06:26:06.000Z
|
2022-01-23T13:03:30.000Z
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 32
|
2021-11-12T15:29:04.000Z
|
2022-01-23T14:44:19.000Z
|
peacebot/core/plugins/Miscellaneous/__init__.py
|
Peacebot-Development/Peacebot-v2
|
79ab87b12cd60b708631d96021ac3d3eaeee01c9
|
[
"MIT"
] | 1
|
2021-11-13T06:34:03.000Z
|
2021-11-13T06:34:03.000Z
|
import lightbulb
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from peacebot.core.utils.time import TimeConverter
def fetch_scheduler(ctx: lightbulb.Context) -> AsyncIOScheduler:
return ctx.bot.d.scheduler
async def convert_time(ctx: lightbulb.Context, time: str) -> float:
seconds = await TimeConverter.convert(TimeConverter, ctx, time)
return seconds
async def send_remainder(ctx: lightbulb.Context, text: str) -> None:
await ctx.respond(
f"{ctx.author.mention} Remainder: `{text}`",
user_mentions=True,
)
| 25.727273
| 68
| 0.738516
| 68
| 566
| 6.088235
| 0.544118
| 0.086957
| 0.137681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162544
| 566
| 21
| 69
| 26.952381
| 0.873418
| 0
| 0
| 0
| 0
| 0
| 0.070671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6fe319ea41fa303d91576c379f5911e22bf4307
| 855
|
py
|
Python
|
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 8
|
2019-04-26T04:09:40.000Z
|
2022-01-04T05:24:12.000Z
|
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | null | null | null |
example/android/python/msite_simple_default_browser.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 2
|
2019-12-16T15:34:57.000Z
|
2020-10-22T07:03:15.000Z
|
import unittest
from appium import webdriver
class MSiteDefaultBrowserAndroidUITests(unittest.TestCase):
def setUp(self):
# Default browser does not exist for android >= 6.0
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'appPackage': 'com.android.browser',
'appActivity': 'com.android.browser.BrowserActivity',
'avd': 'samsung_galaxy_s6_6.0'
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
def test_open_url(self):
self.driver.get('http://targeturl.com')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MSiteDefaultBrowserAndroidUITests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 29.482759
| 90
| 0.65614
| 89
| 855
| 6.134831
| 0.651685
| 0.054945
| 0.062271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024024
| 0.221053
| 855
| 28
| 91
| 30.535714
| 0.795796
| 0.05731
| 0
| 0
| 0
| 0
| 0.248756
| 0.069652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6fe636ebee73df95de2568536aed7f6f3927fad
| 458
|
py
|
Python
|
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
src/nn/dataset_utils/types_processing.py
|
sola-st/Nalin
|
3a6f95cec95d9152a65af970cfbb145179b0bd72
|
[
"MIT"
] | null | null | null |
"""
Created on 17-June-2020
@author Jibesh Patra
The types extracted during runtime usually look something like --> <class 'numpy.ndarray'> or
<class 'seaborn.palettes._ColorPalette'> change them to --> ndarray, ColorPalette
"""
import re
remove_chars = re.compile(r'>|\'|<|(class )|_|(type)')
def process_types(tp: str) -> str:
cleaned_type = remove_chars.sub('', tp)
cleaned_type = cleaned_type.split('.')[-1].strip()
return cleaned_type
| 24.105263
| 93
| 0.696507
| 61
| 458
| 5.081967
| 0.721311
| 0.141935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017903
| 0.146288
| 458
| 18
| 94
| 25.444444
| 0.774936
| 0.482533
| 0
| 0
| 0
| 0
| 0.017544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6fee516b4253e139cd1d42c7d2077b96248a564
| 4,254
|
py
|
Python
|
src/canvas.py
|
soootaleb/spare
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | 1
|
2019-05-21T16:04:08.000Z
|
2019-05-21T16:04:08.000Z
|
src/canvas.py
|
soootaleb/school-spacial-relations
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | null | null | null |
src/canvas.py
|
soootaleb/school-spacial-relations
|
b454b9a8861df55c29fe55b4b584248a2ffe79cb
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.ticker as ticker
import numpy as np
import random, matplotlib.pyplot as plt
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
data = [random.random() for i in range(25)]
ax = self.figure.add_subplot(111)
ax.plot(data, 'r-')
ax.set_title('PyQt Matplotlib Example')
class ImageCanvas(FigureCanvas):
def __init__(self, parent = None, width = 5, height = 4, dpi=100):
fig = Figure(figsize = (width, height), dpi = dpi, frameon = False)
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.axes = fig.add_subplot(111)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot(self, image):
self.axes.axis('off')
display = image.image
display[display == 1] = 255
if image.color:
self.axes.imshow(image.image)
else :
self.axes.imshow(display, cmap = "gray")
self.show()
class HistogramCanvas(FigureCanvas):
'''
This class is used to plt the histogram of the two objects in the main module.
the values are computed in one of the descriptors.
'''
def __init__(self, parent = None, is_polar = True, width = 8, height = 5, dpi = 100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.is_polar = is_polar
self.setParent(parent)
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
self.axes.grid(True)
#TODO : Add the names of the objects (fname - extention ?)
FigureCanvas.updateGeometry(self)
def plot(self, histogram, color = None):
self.axes.set_title("Spatial relations between A and B", va='bottom')
if self.is_polar:
self.axes.set_rlim(0,1)
theta = [float(k)/ 180 * np.pi for k in histogram.values.keys()]
#TODO : refractor this ugly-but-working code
if len(theta) > 16:
i = 0
theta_major_name = []
for k in histogram.values.keys():
if i % 3 == 0:
theta_major_name.append(float(k)/ 180 * np.pi)
i+=1
self.axes.xaxis.set_major_locator(ticker.FixedLocator(theta_major_name))
else :
self.axes.xaxis.set_major_locator(ticker.LinearLocator(len(theta)))
self.axes.xaxis.set_minor_locator(ticker.LinearLocator(len(theta)))
self.axes.grid(b = True, which='major', linestyle='-')
self.axes.grid(b = True, which='minor', linestyle='--')
self.axes.plot(theta, list(histogram.values.values()))
else:
self.axes.plot(list(histogram.values.keys()), list(histogram.values.values()))
# self.axes.plot(list(histogram.values.keys()), list(histogram.gaussian), color="red", ls='--')
self.draw()
def clear(self):
self.axes.clear()
def lin_or_polar(self, new_value : bool):
'''
set the type of the histogram to be polar or linear.
'''
self.is_polar = new_value
self.fig.clear()
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
FigureCanvas.updateGeometry(self)
| 35.157025
| 107
| 0.609074
| 526
| 4,254
| 4.813688
| 0.288973
| 0.066351
| 0.03594
| 0.037915
| 0.458926
| 0.435624
| 0.372828
| 0.252765
| 0.252765
| 0.21485
| 0
| 0.020806
| 0.276916
| 4,254
| 120
| 108
| 35.45
| 0.802341
| 0.088858
| 0
| 0.317647
| 0
| 0
| 0.024537
| 0
| 0
| 0
| 0
| 0.008333
| 0
| 1
| 0.094118
| false
| 0
| 0.105882
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc0054805adf6c4edaa7b274d8d98323387b2aa1
| 7,561
|
py
|
Python
|
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
src/cpg_scpi/test/__init__.py
|
GeorgBraun/cpg_scpi_python
|
ec74c15beaac0b002fb996a42f4e66ea369e1fc6
|
[
"MIT"
] | null | null | null |
'''Functional tests for CPG'''
from .. import CircuitPlayground
from .. import __version__ as CircuitPlaygroundVersion
import time
def funcTest(timestamps: bool = False) -> None:
cpg = CircuitPlayground()
if timestamps:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG with timestamps ...\n')
else:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG without timestamps ...\n')
# test_led(cpg)
# test_buttonAny(cpg, timestamps)
# test_switch(cpg, timestamps)
test_temp(cpg, timestamps)
test_light(cpg, timestamps)
test_acc(cpg, timestamps)
test_touch(cpg, timestamps)
_printFuncTestHeadingWithDeliLine('DONE WITH FUNCTIONAL-TESTS')
_printFuncTestDeliLine()
def _printCountdown(start: int = 3) -> None:
for i in range(start, 0, -1):
print(i, end=" ", flush=True)
time.sleep(1)
print('', flush=True)
def _printFuncTestDeliLine() -> None:
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
def _printFuncTestHeadingWithDeliLine(heading) -> None:
_printFuncTestDeliLine()
print(heading)
def test_buttonAny(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | any button |'
outFormat = '| {:5} | {:12.3f} | {!s:10} |'
else:
outHeading = '| count | any button |'
outFormat = '| {:5} | {!s:10} |'
_printFuncTestHeadingWithDeliLine('Button-Test: Press left or right button...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.buttonAny_wts()) if timestamps else (count-i, cpg.buttonAny())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_switch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | switch |'
outFormat = '| {:5} | {:12.3f} | {!s:6} |'
else:
outHeading = '| count | switch |'
outFormat = '| {:5} | {!s:6} |'
_printFuncTestHeadingWithDeliLine('Switch-Test: Change slider switch position...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.switch_wts()) if timestamps else (count-i, cpg.switch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_temp(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | temp °C |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} |'
else:
outHeading = '| count | temp °C |'
outFormat = '| {:5} | {:7.2f} |'
_printFuncTestHeadingWithDeliLine('Temp-Sensor-Test ...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.temp_wts()) if timestamps else (count-i, cpg.temp())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_light(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | light |'
outFormat = '| {:5} | {:12.3f} | {:5} |'
else:
outHeading = '| count | light |'
outFormat = '| {:5} | {:5} |'
_printFuncTestHeadingWithDeliLine('Light-Sensor-Test: Move hand over light sensor...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.light_wts()) if timestamps else (count-i, cpg.light())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_acc(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc_wts
else:
outHeading = '| count | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc
_printFuncTestHeadingWithDeliLine('Accelerometer-Test: Tilt the CPG board...')
print(outHeading)
_printCountdown(3)
count = 60
for i in range(count):
print(outFormat.format(count-i, *testFunction()))
cpg.wait(0.2)
def test_touch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | touch | binary |'
outFormat = '| {0:5} | {1:12.3f} | {2:5} | {2:08b} |'
else:
outHeading = '| count | touch | binary |'
outFormat = '| {0:5} | {1:5} | {1:08b} |'
_printFuncTestHeadingWithDeliLine('Touch-Sensor-Test: Touch capacitive sensor pads...')
print(outHeading)
_printCountdown(3)
count = 30
for i in range(count):
result = (count-i, *cpg.touch_wts()) if timestamps else (count-i, cpg.touch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_led(cpg) -> None:
'''Flash LEDs and run a short chasing light.'''
_printFuncTestHeadingWithDeliLine('LED-Test: Flash LEDs and run a short chasing light...')
print('flashing LEDs...')
test_ledDemo(cpg)
value=1
# print('| val | LEDs |')
for i in range(10):
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
value <<= 1 # shift 1 bit to the left
for i in range(10):
value >>= 1 # shift 1 bit to the right
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
print('flashing LEDs...')
test_ledDemo(cpg)
def test_ledDemo(cpg) -> None:
'''Flash LEDs three times.'''
for i in range(3):
cpg.ledDemo()
cpg.wait(0.2)
def testAccSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do an accelerometer measurement.'''
print(f'Testing acc measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.acc(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def testLightSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do a light sensor measurement.'''
print(f'Testing light measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.light(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def _testResponseWaitTime(cpg, iterations: int = 10000) -> None:
'''Test it the wait time for additional, unexpected responses is long enough.'''
print(f'Testing Response-Wait-Time with {iterations} iterations ...')
for i in range(iterations):
if i%100==0: print('try-count', i)
try:
# Request acc measurement values, but do not expect any response, even if the CPG will send one.
cpg._query('MEAS:ACC?', 0)
# If we are still here, we did not get a response. This is bad.
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
print('ERROR in testResponseWaitTime(): CPG-Response was too late.')
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
except Exception:
# The normal behavior is a response, resulting in an exception.
# This is what we expected. Therefore, just continue.
pass
| 37.616915
| 151
| 0.61923
| 902
| 7,561
| 5.135255
| 0.213969
| 0.031088
| 0.014249
| 0.026123
| 0.494819
| 0.454231
| 0.426382
| 0.387522
| 0.30462
| 0.286269
| 0
| 0.027643
| 0.239254
| 7,561
| 200
| 152
| 37.805
| 0.777295
| 0.104748
| 0
| 0.43038
| 0
| 0.031646
| 0.289771
| 0.051293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094937
| false
| 0.018987
| 0.031646
| 0
| 0.126582
| 0.316456
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc01d88d24681ec66a1cf06a3a055252d072afd3
| 31,292
|
py
|
Python
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | 2
|
2022-01-19T21:00:48.000Z
|
2022-01-27T05:54:13.000Z
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | 1
|
2021-12-24T11:32:17.000Z
|
2021-12-24T11:32:17.000Z
|
gridfs/grid_file.py
|
naomielst/mongo-python-driver
|
e3d1d6f5b48101654a05493fd6eec7fe3fa014bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
def getter(self):
self._ensure_file()
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
def _disallow_transactions(session):
if session and session.in_transaction:
raise InvalidOperation(
'GridFS does not support multi-document transactions')
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, session=None, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. Any :class:`str`
that is written to the file will be converted to :class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 4.0
Removed the `disable_md5` parameter. See
:ref:`removed-gridfs-checksum` for details.
.. versionchanged:: 3.7
Added the `disable_md5` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
_disallow_transactions(session)
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
coll = _clear_entity_type_registry(
root_collection, read_preference=ReadPreference.PRIMARY)
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_session", session)
object.__setattr__(self, "_coll", coll)
object.__setattr__(self, "_chunks", coll.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", io.BytesIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index_key, unique):
doc = collection.find_one(projection={"_id": 1}, session=self._session)
if doc is None:
try:
index_keys = [index_spec['key'] for index_spec in
collection.list_indexes(session=self._session)]
except OperationFailure:
index_keys = []
if index_key not in index_keys:
collection.create_index(
index_key.items(), unique=unique, session=self._session)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
_disallow_transactions(self._session)
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many(
{"files_id": self._file['_id']}, session=self._session)
self._coll.files.delete_one(
{"_id": self._file['_id']}, session=self._session)
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _grid_in_property("filename", "Name of this file.")
name = _grid_in_property("filename", "Alias for `filename`.")
content_type = _grid_in_property("contentType", "Mime-type for this file.")
length = _grid_in_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _grid_in_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
# For properties of this instance like _buffer, or descriptors set on
# the class like filename, use regular __setattr__
if name in self.__dict__ or name in self.__class__.__dict__:
object.__setattr__(self, name, value)
else:
# All other attributes are part of the document in db.fs.files.
# Store them to be sent to server on close() or if closed, send
# them now.
self._file[name] = value
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {name: value}})
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = io.BytesIO()
def __flush(self):
"""Flush the file to the database.
"""
try:
self.__flush_buffer()
# The GridFS spec says length SHOULD be an Int64.
self._file["length"] = Int64(self._position)
self._file["uploadDate"] = datetime.datetime.utcnow()
return self._coll.files.insert_one(
self._file, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._id)
def _raise_file_exists(self, file_id):
"""Raise a FileExists exception for the given file_id."""
raise FileExists("file with _id %r already exists" % file_id)
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True)
def read(self, size=-1):
raise io.UnsupportedOperation('read')
def readable(self):
return False
def seekable(self):
return False
def write(self, data):
"""Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`str` instance, which will be encoded as
:attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`bytes`, a file-like object, or an instance of :class:`str`.
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file
"""
if self._closed:
raise ValueError("cannot write to a closed file")
try:
# file-like
read = data.read
except AttributeError:
# string
if not isinstance(data, (str, bytes)):
raise TypeError("can only write strings or file-like objects")
if isinstance(data, str):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError("must specify an encoding for file in "
"order to write str")
read = io.BytesIO(data).read
if self._buffer.tell() > 0:
# Make sure to flush only when _buffer is complete
space = self.chunk_size - self._buffer.tell()
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF or incomplete
self.__flush_buffer()
to_write = read(self.chunk_size)
while to_write and len(to_write) == self.chunk_size:
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add seperators.
"""
for line in sequence:
self.write(line)
def writeable(self):
return True
def __enter__(self):
"""Support for the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for the context manager protocol.
Close the file and allow exceptions to propagate.
"""
self.close()
# propagate exceptions
return False
class GridOut(io.IOBase):
"""Class to read data out of GridFS.
"""
def __init__(self, root_collection, file_id=None, file_document=None,
session=None):
"""Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
.. versionchanged:: 3.8
For better performance and to better follow the GridFS spec,
:class:`GridOut` now uses a single cursor to read all the chunks in
the file.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
_disallow_transactions(session)
root_collection = _clear_entity_type_registry(root_collection)
super().__init__()
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__chunk_iter = None
self.__position = 0
self._file = file_document
self._session = session
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
filename = _grid_out_property("filename", "Name of this file.")
name = _grid_out_property("filename", "Alias for `filename`.")
content_type = _grid_out_property("contentType", "Mime-type for this file.")
length = _grid_out_property("length", "Length (in bytes) of this file.")
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
upload_date = _grid_out_property("uploadDate",
"Date that this file was first uploaded.")
aliases = _grid_out_property("aliases", "List of aliases for this file.")
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.")
def _ensure_file(self):
if not self._file:
_disallow_transactions(self._session)
self._file = self.__files.find_one({"_id": self.__file_id},
session=self._session)
if not self._file:
raise NoFile("no file in gridfs collection %r with _id %r" %
(self.__files, self.__file_id))
def __getattr__(self, name):
self._ensure_file()
if name in self._file:
return self._file[name]
raise AttributeError("GridOut object has no attribute '%s'" % name)
def readable(self):
return True
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def tell(self):
"""Return the current position of this file.
"""
return self.__position
def seek(self, pos, whence=_SEEK_SET):
"""Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file's end.
"""
if whence == _SEEK_SET:
new_pos = pos
elif whence == _SEEK_CUR:
new_pos = self.__position + pos
elif whence == _SEEK_END:
new_pos = int(self.length) + pos
else:
raise IOError(22, "Invalid value for `whence`")
if new_pos < 0:
raise IOError(22, "Invalid value for `pos` - must be positive")
# Optimization, continue using the same buffer and chunk iterator.
if new_pos == self.__position:
return
self.__position = new_pos
self.__buffer = EMPTY
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
def seekable(self):
return True
def __iter__(self):
"""Return an iterator over all of this file's data.
The iterator will return lines (delimited by ``b'\\n'``) of
:class:`bytes`. This can be useful when serving files
using a webserver that handles such an iterator efficiently.
.. versionchanged:: 3.8
The iterator now raises :class:`CorruptGridFile` when encountering
any truncated, missing, or extra chunk in a file. The previous
behavior was to only raise :class:`CorruptGridFile` on a missing
chunk.
.. versionchanged:: 4.0
The iterator now iterates over *lines* in the file, instead
of chunks, to conform to the base class :py:class:`io.IOBase`.
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
of line by line.
"""
return self
def close(self):
"""Make GridOut more generically file-like."""
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
super().close()
def write(self, value):
raise io.UnsupportedOperation('write')
def writelines(self, lines):
raise io.UnsupportedOperation('writelines')
def writable(self):
return False
def __enter__(self):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
self.close()
return False
def fileno(self):
raise io.UnsupportedOperation('fileno')
def flush(self):
# GridOut is read-only, so flush does nothing.
pass
def isatty(self):
return False
def truncate(self, size=None):
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
# for why truncate has to raise.
raise io.UnsupportedOperation('truncate')
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
# We cannot do I/O in __del__ since it can lead to a deadlock.
def __del__(self):
pass
class _GridOutChunkIterator(object):
"""Iterates over a file's chunks using a single cursor.
Raises CorruptGridFile when encountering any truncated, missing, or extra
chunk in a file.
"""
def __init__(self, grid_out, chunks, session, next_chunk):
self._id = grid_out._id
self._chunk_size = int(grid_out.chunk_size)
self._length = int(grid_out.length)
self._chunks = chunks
self._session = session
self._next_chunk = next_chunk
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
self._cursor = None
def expected_chunk_length(self, chunk_n):
if chunk_n < self._num_chunks - 1:
return self._chunk_size
return self._length - (self._chunk_size * (self._num_chunks - 1))
def __iter__(self):
return self
def _create_cursor(self):
filter = {"files_id": self._id}
if self._next_chunk > 0:
filter["n"] = {"$gte": self._next_chunk}
_disallow_transactions(self._session)
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
session=self._session)
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
def next(self):
try:
chunk = self._next_with_retry()
except StopIteration:
if self._next_chunk >= self._num_chunks:
raise
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
if chunk["n"] != self._next_chunk:
self.close()
raise CorruptGridFile(
"Missing chunk: expected chunk #%d but found "
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
if chunk["n"] >= self._num_chunks:
# According to spec, ignore extra chunks if they are empty.
if len(chunk["data"]):
self.close()
raise CorruptGridFile(
"Extra chunk found: expected %d chunks but found "
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
expected_length = self.expected_chunk_length(chunk["n"])
if len(chunk["data"]) != expected_length:
self.close()
raise CorruptGridFile(
"truncated chunk #%d: expected chunk length to be %d but "
"found chunk with length %d" % (
chunk["n"], expected_length, len(chunk["data"])))
self._next_chunk += 1
return chunk
__next__ = next
def close(self):
if self._cursor:
self._cursor.close()
self._cursor = None
class GridOutIterator(object):
def __init__(self, grid_out, chunks, session):
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
def __iter__(self):
return self
def next(self):
chunk = self.__chunk_iter.next()
return bytes(chunk["data"])
__next__ = next
class GridOutCursor(Cursor):
"""A cursor / iterator for returning GridOut objects as the result
of an arbitrary query against the GridFS files collection.
"""
def __init__(self, collection, filter=None, skip=0, limit=0,
no_cursor_timeout=False, sort=None, batch_size=0,
session=None):
"""Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
_disallow_transactions(session)
collection = _clear_entity_type_registry(collection)
# Hold on to the base "fs" collection to create GridOut objects later.
self.__root_collection = collection
super(GridOutCursor, self).__init__(
collection.files, filter, skip=skip, limit=limit,
no_cursor_timeout=no_cursor_timeout, sort=sort,
batch_size=batch_size, session=session)
def next(self):
"""Get next GridOut object from cursor.
"""
_disallow_transactions(self.session)
# Work around "super is not iterable" issue in Python 3.x
next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file,
session=self.session)
__next__ = next
def add_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def remove_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def _clone_base(self, session):
"""Creates an empty GridOutCursor for information to be copied into.
"""
return GridOutCursor(self.__root_collection, session=session)
| 35.721461
| 103
| 0.599035
| 3,709
| 31,292
| 4.830682
| 0.149636
| 0.014567
| 0.010158
| 0.004019
| 0.291399
| 0.250544
| 0.213038
| 0.186136
| 0.154211
| 0.142937
| 0
| 0.005351
| 0.307267
| 31,292
| 875
| 104
| 35.762286
| 0.821193
| 0.293078
| 0
| 0.385892
| 0
| 0
| 0.102517
| 0
| 0
| 0
| 0
| 0
| 0.002075
| 1
| 0.13278
| false
| 0.006224
| 0.029046
| 0.018672
| 0.304979
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc02e2f67f44eb696a821c6397117531267c2ddc
| 496
|
py
|
Python
|
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
src/serve_files.py
|
eventh/m3u8looper
|
9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Serve current folder files in a HTTP webserver.
"""
import socketserver
from threading import Thread
from http.server import SimpleHTTPRequestHandler
PORT = 8000
def start_http_server(port=PORT):
httpd = socketserver.TCPServer(("", port), SimpleHTTPRequestHandler)
thread = Thread(target = httpd.serve_forever)
thread.start()
return thread
if __name__ == '__main__':
thread = start_http_server()
thread.join()
| 20.666667
| 76
| 0.709677
| 58
| 496
| 5.844828
| 0.603448
| 0.088496
| 0.088496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.177419
| 496
| 23
| 77
| 21.565217
| 0.816176
| 0.185484
| 0
| 0
| 0
| 0
| 0.020253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc03078d9d14b23c740018bcdf9069c213af00f0
| 7,393
|
py
|
Python
|
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/__builtin__/test/test_compile.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# coding: utf-8
class AppTestCompile:
def test_simple(self):
import sys
co = compile('1+2', '?', 'eval')
assert eval(co) == 3
co = compile(memoryview(b'1+2'), '?', 'eval')
assert eval(co) == 3
exc = raises(ValueError, compile, chr(0), '?', 'eval')
assert str(exc.value) == "source code string cannot contain null bytes"
compile("from __future__ import with_statement", "<test>", "exec")
raises(SyntaxError, compile, '-', '?', 'eval')
raises(SyntaxError, compile, '"\\xt"', '?', 'eval')
raises(ValueError, compile, '1+2', '?', 'maybenot')
raises(ValueError, compile, "\n", "<string>", "exec", 0xff)
raises(TypeError, compile, '1+2', 12, 34)
def test_error_message(self):
import re
compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
exc = raises(SyntaxError, compile,
b'# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
assert 'iso-8859-15' in str(exc.value)
assert 'BOM' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
assert 'BOM' in str(exc.value)
def test_unicode(self):
try:
compile(u'-', '?', 'eval')
except SyntaxError as e:
assert e.lineno == 1
def test_unicode_encoding(self):
code = "# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
def test_bytes(self):
code = b"# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
c = compile(b"# coding: latin1\nfoo = 'caf\xe9'\n", "<string>", "exec")
ns = {}
exec(c, ns)
assert ns['foo'] == 'café'
assert eval(b"# coding: latin1\n'caf\xe9'\n") == 'café'
def test_memoryview(self):
m = memoryview(b'2 + 1')
co = compile(m, 'baz', 'eval')
assert eval(co) == 3
assert eval(m) == 3
ns = {}
exec(memoryview(b'r = 2 + 1'), ns)
assert ns['r'] == 3
def test_recompile_ast(self):
import _ast
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
raises(TypeError, compile, co1, '<ast>', 'eval')
co2 = compile('1+1', '<string>', 'eval', _ast.PyCF_ONLY_AST)
tree = compile(co2, '<ast>', 'eval')
assert compile(co2, '<ast>', 'eval', _ast.PyCF_ONLY_AST) is co2
def test_leading_newlines(self):
src = """
def fn(): pass
"""
co = compile(src, 'mymod', 'exec')
firstlineno = co.co_firstlineno
assert firstlineno == 2
def test_null_bytes(self):
raises(ValueError, compile, '\x00', 'mymod', 'exec', 0)
src = "#abc\x00def\n"
raises(ValueError, compile, src, 'mymod', 'exec')
raises(ValueError, compile, src, 'mymod', 'exec', 0)
def test_null_bytes_flag(self):
try:
from _ast import PyCF_ACCEPT_NULL_BYTES
except ImportError:
skip('PyPy only (requires _ast.PyCF_ACCEPT_NULL_BYTES)')
raises(SyntaxError, compile, '\x00', 'mymod', 'exec',
PyCF_ACCEPT_NULL_BYTES)
src = "#abc\x00def\n"
compile(src, 'mymod', 'exec', PyCF_ACCEPT_NULL_BYTES) # works
def test_compile_regression(self):
"""Clone of the part of the original test that was failing."""
import ast
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(
compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for i, code in enumerate(codeobjs):
print(optval, debugval, docstring, i)
ns = {}
exec(code, ns)
rv = ns['f']()
assert rv == (debugval, docstring)
def test_assert_remove(self):
"""Test removal of the asserts with optimize=1."""
import ast
code = """def f():
assert False
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=1)
ns = {}
exec(compiled, ns)
ns['f']()
def test_docstring_remove(self):
"""Test removal of docstrings with optimize=2."""
import ast
import marshal
code = """
'module_doc'
def f():
'func_doc'
class C:
'class_doc'
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=2)
ns = {}
exec(compiled, ns)
assert '__doc__' not in ns
assert ns['f'].__doc__ is None
assert ns['C'].__doc__ is None
# Check that the docstrings are gone from the bytecode and not just
# inaccessible.
marshalled = str(marshal.dumps(compiled))
assert 'module_doc' not in marshalled
assert 'func_doc' not in marshalled
assert 'class_doc' not in marshalled
class TestOptimizeO:
"""Test interaction of -O flag and optimize parameter of compile."""
def setup_method(self, method):
space = self.space
self._sys_debug = space.sys.debug
# imitate -O
space.sys.debug = False
def teardown_method(self, method):
self.space.sys.debug = self._sys_debug
def test_O_optmize_0(self):
"""Test that assert is not ignored if -O flag is set but optimize=0."""
space = self.space
w_res = space.appexec([], """():
assert False # check that our -O imitation hack works
try:
exec(compile('assert False', '', 'exec', optimize=0))
except AssertionError:
return True
else:
return False
""")
assert space.unwrap(w_res)
def test_O_optimize__1(self):
"""Test that assert is ignored with -O and optimize=-1."""
space = self.space
space.appexec([], """():
exec(compile('assert False', '', 'exec', optimize=-1))
""")
# TODO: Check the value of __debug__ inside of the compiled block!
# According to the documentation, it should follow the optimize flag.
# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows
# -O, -OO flags of the interpreter).
| 34.226852
| 79
| 0.538212
| 880
| 7,393
| 4.392045
| 0.238636
| 0.027167
| 0.035705
| 0.016818
| 0.255627
| 0.2
| 0.148254
| 0.129625
| 0.115653
| 0.09806
| 0
| 0.017388
| 0.315434
| 7,393
| 215
| 80
| 34.386047
| 0.746295
| 0.11051
| 0
| 0.27381
| 0
| 0
| 0.239333
| 0.004282
| 0
| 0
| 0.000612
| 0.004651
| 0.190476
| 1
| 0.107143
| false
| 0.017857
| 0.059524
| 0
| 0.202381
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc035a3b69dad59dad81dc8e5b68a8db4a2f4aff
| 12,207
|
py
|
Python
|
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
tickers_graphing_module.py
|
huangbrandon432/Investing-Trading-Tool
|
370015b906b7ee90c0fb48ca69865ac7428b3917
|
[
"BSD-3-Clause"
] | null | null | null |
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
def plot_and_get_info(ticker, start = None, end = None, ma = 'yes'):
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date]
closing_prices = frame['Close']
volume = frame['Volume']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.03, row_heights = [0.8, 0.2])
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1)
if ma == 'yes':
closing_prices_ma = frame['Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1)
fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1)
fig.update_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1)
fig.update_yaxes(title_text="Price", row=1, col=1)
fig.update_layout(title=ticker, height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
fig.show()
start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close']
def printmd(string):
display(Markdown(string))
printmd('Given Timeframe:')
printmd("Return: {:.2f}%".format((end_price - start_price)/start_price*100))
try:
ticker_info = ticker_obj.info
print()
printmd('Business Summary: ' + ticker_info['longBusinessSummary'])
market_cap = str(round(ticker_info['marketCap']/1000000000,2)) + 'B'
longname = ticker_info['longName']
sector = ticker_info['sector']
industry = ticker_info['industry']
country = ticker_info['country']
avg10d_vol = str(round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M'
most_recent_vol = str(round(ticker_info['volume']/1000000,2)) + 'M'
try:
beta = round(ticker_info['beta'],2)
except:
beta = ticker_info['beta']
try:
ps_trailing_12mo = round(ticker_info['priceToSalesTrailing12Months'],2)
except:
ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months']
try:
forwardpe = round(ticker_info['forwardPE'],2)
except:
forwardpe = ticker_info['forwardPE']
pegratio = ticker_info['pegRatio']
forwardeps = ticker_info['forwardEps']
trailingeps = ticker_info['trailingEps']
shares_outstanding = str(round(ticker_info['sharesOutstanding']/1000000,2)) + 'M'
shares_short = str(round(ticker_info['sharesShort']/1000000,2)) + 'M'
shares_short_perc_outstanding = str(round(ticker_info['sharesPercentSharesOut']*100,2)) + '%'
floatshares = str(round(ticker_info['floatShares']/1000000,2)) + 'M'
try:
short_perc_float = str(round(ticker_info['shortPercentOfFloat']*100,2)) + '%'
except:
short_perc_float = ticker_info['shortPercentOfFloat']
perc_institutions = str(round(ticker_info['heldPercentInstitutions']*100,2)) + '%'
perc_insiders = str(round(ticker_info['heldPercentInsiders']*100,2)) + '%'
stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps,
shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float]
stock_info_df = pd.DataFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)',
'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',
'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',
'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)',
'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info'])
print()
display(stock_info_df)
except:
pass
def compare_charts(tickers = [], start = None, end = None, ma = 'yes'):
if len(tickers) <= 1:
raise Exception("Please enter at least two tickers to compare")
def normalize_data(column):
min = column.min()
max = column.max()
# time series normalization
# y will be a column in a dataframe
y = (column - min) / (max - min)
return y
def printmd(string):
display(Markdown(string))
start_end_prices = {}
closing_90_days = []
fig = go.Figure()
for ticker in tickers:
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date].copy()
frame['Norm Close'] = normalize_data(frame['Close'])
closing_prices = frame['Norm Close']
start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']}
closing_90_days.append(closing_prices.iloc[-90:].to_frame().rename(columns = {'Norm Close': ticker}))
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close'))
if ma == 'yes':
closing_prices_ma = frame['Norm Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average'))
fig.update_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price')
fig.update_layout(height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True, thickness = 0.1
),
type="date"
)
)
fig.show()
printmd('Given Timeframe:')
for ticker in tickers:
start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price']
printmd(ticker + " Return: {:.2f}%".format((end_price - start_price)/start_price*100))
if len(tickers) > 2:
concat_closing_90_days = pd.concat(closing_90_days, axis = 1)
print('\n')
printmd("Last 90 Days Close Pearson Correlation Matrix: ")
display(concat_closing_90_days.corr())
fig2 = px.imshow(concat_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmap',
width = 500, height = 400)
fig2.show()
else:
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close'))
fig2.update_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500)
fig2.show()
printmd("Pearson Correlation: " + str(round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3)))
print()
def plot_buysell_points(ticker, tradesdf, crypto = 'no'):
trade_history = tradesdf[tradesdf['Symbol'] == ticker].reset_index(drop=True)
if crypto == 'yes':
ticker += '-USD'
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if len(ticker_hist) == 0:
return
start_date = (pd.to_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d")
today_date = date.today().strftime("%Y-%m-%d")
frame = ticker_hist.loc[start_date:today_date]
closing_prices = frame['Close']
fig = go.Figure()
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'))
for i in range(len(trade_history)):
trade_date = trade_history.loc[i, 'Date']
price = trade_history.loc[i, 'Avg_Price']
quantity = trade_history.loc[i, 'Quantity']
total = trade_history.loc[i, 'Total']
side = trade_history.loc[i, 'Side']
gain = trade_history.loc[i, 'Gain']
perc_gain = trade_history.loc[i, '% Gain']
if side == 'buy':
fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowhead = 1,
ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left',
hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}')
if side == 'sell':
fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowhead = 1,
ax = 20, ay = -30, arrowsize = 1.5, align = 'right',
hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}')
fig.update_layout(title = ticker, yaxis_title = 'Price')
fig.show()
| 35.178674
| 169
| 0.530843
| 1,332
| 12,207
| 4.693694
| 0.215465
| 0.041587
| 0.03119
| 0.028791
| 0.430902
| 0.352047
| 0.286628
| 0.261996
| 0.261996
| 0.252079
| 0
| 0.029986
| 0.341607
| 12,207
| 346
| 170
| 35.280347
| 0.747916
| 0.004833
| 0
| 0.468085
| 0
| 0.008511
| 0.135046
| 0.010211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025532
| false
| 0.004255
| 0.038298
| 0
| 0.07234
| 0.055319
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc072ef6a205b171dfc4d3510829d73d11a5f833
| 2,360
|
py
|
Python
|
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | null | null | null |
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | 1
|
2021-05-12T21:53:53.000Z
|
2021-05-12T22:26:09.000Z
|
tests/test_aggregate_stats_design.py
|
bids-standard/bids-statsmodels-design-synthesizer
|
d8a1dac3891760990082c2d3aa75a1edda44ffa0
|
[
"MIT"
] | 3
|
2021-05-06T12:44:04.000Z
|
2021-05-12T21:42:59.000Z
|
#!/usr/bin/env python
"""Tests for `bids_statsmodels_design_synthesizer` package."""
import pytest
import subprocess as sp
from pathlib import Path
SYNTHESIZER = "aggregate_stats_design.py"
from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod
# from bids_statsmodels_design_synthesizer import Path(SYNTHESIZER).stem as synth_mod
EXAMPLE_USER_ARGS = {
"OUTPUT_TSV": "aggregated_design.tsv",
"MODEL": "data/ds000003/models/model-001_smdl.json",
"EVENTS_TSV": "data/ds000003/sub-01/func/sub-01_task-rhymejudgment_events.tsv",
"DURATION": 320,
}
def test_cli_help():
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "-h"])
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "--non-existent"])
def test_design_aggregation_function():
synth_mod.main(EXAMPLE_USER_ARGS)
def test_minimal_cli_functionality():
"""
We roughly want to implement the equivalent of the following:
from bids.analysis import Analysis
from bids.layout import BIDSLayout
layout = BIDSLayout("data/ds000003")
analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout)
analysis.setup()
more specifically we want to reimplement this line
https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282
"""
bids_dir = Path(__file__).parent / "data/ds000003"
model = "model-001_smdl.json"
arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in EXAMPLE_USER_ARGS.items()])
cmd = f"{SYNTHESIZER} {arg_list}"
output = sp.check_output(cmd.split())
@pytest.mark.xfail(reason="Container not setup for boutiques yet")
def test_minimal_cli_functionality_using_boutiques():
"""This might be nice to do. boutiques sets /bin/sh as the entrypoint for the contain to /bin/sh so this should be tweaked to have the conda env and the pip installed package working correctly"""
boutiques_dir = Path(__file__).parent.parent / "boutiques"
cmd = f"""
bosh
exec
launch
{boutiques_dir}/bids-app-bids-statsmodels-design-synthesizer.json
{boutiques_dir}/invocation.json
"""
output = sp.check_output(cmd.split())
| 36.875
| 199
| 0.715678
| 306
| 2,360
| 5.316993
| 0.437909
| 0.036878
| 0.051629
| 0.078672
| 0.250768
| 0.213891
| 0.129072
| 0.129072
| 0.081131
| 0.081131
| 0
| 0.038265
| 0.169492
| 2,360
| 63
| 200
| 37.460317
| 0.791837
| 0.335169
| 0
| 0.114286
| 0
| 0
| 0.339498
| 0.18362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.114286
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc0e5695633a29e1789efba016b66fc96fcedf4a
| 15,518
|
py
|
Python
|
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | 1
|
2020-03-02T04:26:21.000Z
|
2020-03-02T04:26:21.000Z
|
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | null | null | null |
pangenome_fluidity.py
|
PlantDr430/CSU_scripts
|
8ed9e1dc014b099ce68d77ce5c8747217c230e61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''
This script follows formulas put forth in Kislyuk et al. (2011) to calculate genome
fluidity of a pangenome dataset. Variance and standard error are estimated as total
variance containing both the variance due to subsampling all possible combinations
(without replacement) of N genomes from the total pool of genomes and the variance
due to the limited number of sampled genomes (variance of the pangenome)(Kislyuk et al. 2011).
However, the script has a default max number of subsamples set to 250,000 for each N genomes.
This can be altered with the -max_sub / --max_subsamples flag or turned off with the --max_off flag.
Turning the max_off will force calculations to be done on all possible subsample combinations
of N genomes. For samples of N genomes that were stopped at the max number of subsamples the subsamples
are sampled WITH replacement and variance is calculated with a degree of freedom = 1 (i.e. n - 1).
Results are a text file of fluidity, variance, and standard error for all N genome samples
and a figure of pangenome fluidity with shaded regions showing total standard error with a
exponential regression fit.
Notes
1. This will only work if you have at least 5 isolates to make up your pangenome.
2. If you have 5 isolates your graph will probably not look pretty as it's difficult
to fit with such a low number of samples.
'''
import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from itertools import combinations
from collections import OrderedDict
from collections.abc import Iterable
from scipy.optimize import curve_fit, differential_evolution
rundir = os.getcwd()
class MyFormatter(argparse.RawTextHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(
usage='./%(prog)s [options] -i orthogroups -o output_folder',
description = ''' Performs multiple bootstraps and calculates genome fluidity
from a pangenome dataset (orthogroups).''',
epilog = """Written by Stephen A. Wyka (2019)""",
formatter_class = MyFormatter)
parser.add_argument(
'-i',
'--input',
required = True,
help = 'Orthogroups file, see format in READ.me',
metavar=''
)
parser.add_argument(
'-o',
'--out',
required = True,
help = 'Output folder',
metavar=''
)
parser.add_argument(
'-c',
'--cpus',
type=int,
default=1,
help = 'Number of cores to use for multiprocessing [default: 1]',
metavar=''
)
parser.add_argument(
'-max_sub',
'--max_subsamples',
type=int,
default=250000,
help = 'Max number of subsamples to run on N genomes sampled. [default: 250000]',
metavar=''
)
parser.add_argument(
'--max_off',
action='store_true',
help = 'Turn off the max subsamples. This will cause the script sample ALL possible combinations'\
'for N genomes',
)
parser.add_argument(
'-p',
'--prefix',
help = 'Prefix to append to the result files (such as Genus, species, etc.)',
metavar=''
)
args=parser.parse_args()
if not os.path.isdir(args.out):
os.makedirs(os.path.join(args.out))
result_dir = os.path.abspath(os.path.join(rundir, args.out))
if args.input:
input_file = os.path.abspath(args.input)
else:
print('ERROR: No orthogroups file was provided please provide on, -i or --input')
sys.exit()
if args.prefix:
fluid_results = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.png'))
else:
fluid_results = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.png'))
def create_ortho_dictionary(ortho_file): # create dictionary of gene clusters and isolates per cluster
'''Genereate dictionary of Orthogroups.'''
print('Creating ortholog dictionary')
ortho_isolates_dict = OrderedDict() # {Protein Cluster : list of isolates represented in cluster}
with open(ortho_file, 'r') as infile:
ortho_list = [item.strip() for item in sorted(infile)]
for line in ortho_list:
iso_list = []
if ':' in line:
cluster, genes = line.split(':')
elif '\t' in line:
cluster, genes = line.split('\t', 1)
else:
cluster, genes = line.split(' ', 1)
for match in re.finditer(r'([^\s]+)', genes):
isolate = match.group(0).split('_')[0]
iso_list.append(isolate)
ortho_isolates_dict[cluster] = list(set(iso_list))
return ortho_isolates_dict
def create_pair_dictionary(ortho_dictionary):
'''Create all possible unique pairs of isolates and get their unique
sum gene clusters.'''
print('Creating dictionary of paired ratio values')
pair_dict = {} # {(Isolate1, Isolate2) : [ratio of sum(unique clusters)/sum(all clusters)]}
for i in range(0, len(iso_list)):
for x in range(0, len(iso_list)):
if not iso_list[i] == iso_list[x]:
pair = tuple(sorted([iso_list[i], iso_list[x]]))
if not pair in pair_dict.keys():
cogs = {'Shared' : 0, 'Uk' : 0, 'Ul' : 0}
for k,v in ortho_dictionary.items():
if pair[0] in v and pair[1] in v:
cogs['Shared'] += 1
elif pair[0] in v and pair[1] not in v:
cogs['Uk'] += 1
elif pair[0] not in v and pair[1] in v:
cogs['Ul'] += 1
else:
pass # don't need to count a cluster if both isolates are not present
unique_pair = cogs['Uk'] + cogs['Ul']
all_pair = (cogs['Uk'] + cogs['Shared']) + (cogs['Ul'] + cogs['Shared'])
pair_dict[pair] = unique_pair/all_pair
return pair_dict
def compute_fluidity_all_genomes():
'''
Computes the fluidity and variance for the pangenome in question from the max number
of genomes in the pangenome.
'''
N = iso_num
fluidity_list = [ratio for ratio in pair_dict.values()] # list of ratios
pangenome_fluidity = (2/(N*(N-1)))*sum(fluidity_list) # get fluidity from average of all ratios
jack_samples = list(combinations(iso_list, N - 1)) # get list of all combos of N-1 from max num of genomes
fluidity_i_list = []
for sample in jack_samples:
jack_pairs = tuple(combinations(sample,2)) # get all pairs from current jackknife sample
jack_sample_fluidity = [pair_dict[tuple(sorted(p))] for p in jack_pairs] # get ratios from pair_dict
fluidity_i = (2/((N-1)*(N-2)))*sum(jack_sample_fluidity) # calculate fluidity_i
fluidity_i_list.append(fluidity_i)
fluidity_i_mean = np.mean(fluidity_i_list) # calculate fluidity_i_mean from all fluidity_i's
fluidity_variance = ((N-1)/N)*sum([(i-fluidity_i_mean)**2 for i in fluidity_i_list]) # calculate variance
return pangenome_fluidity, fluidity_variance
def subsample_multiprocess(combo_list):
'''
Takes portions of the full combo_list and runs them on separate threads for faster processing.
Calcualtes fluidity for each sample and returns list of fluidities.
'''
N = len(combo_list[0]) # get N from number of genomes present
sample_process_list = []
for sample in combo_list:
pairs = tuple(combinations(sample,2))
pair_fluidity_list = [pair_dict[tuple(sorted(p))] for p in pairs]
sample_fluidity = (2/(N*(N-1)))*sum(pair_fluidity_list)
sample_process_list.append(sample_fluidity)
return sample_process_list
def genome_subsamples_fluidities(perm_list):
'''
Compute fluidities from all possible combinations of genomes from 3 to N randomly sampled genomes
(N is the max number of gneomes in sample, so only sampled once). Has a cut off of max subsamples
at which point variances are calcualted as sample variances (n-1) instead of full population
variances.
'''
sub_fluid_dict = {} # {N genomes sampled : [list of fluidities from subsamples]}
for N in range(3, iso_num + 1):
sub_fluid_dict[N] = []
N_combos = list(combinations(iso_list, N))
if args.max_off:
combos = N_combos
else:
if len(N_combos) > args.max_subsamples:
combos = random.choices(N_combos, k=args.max_subsamples)
perm_list.append(N)
else:
combos = N_combos
print('Performing fluidity calculations on {} subsample combinations of {} genomes'.format(len(combos),N))
if not len(N_combos) == 1:
chunk = round(len(combos)/args.cpus)
split_combos = [combos[i:i + chunk] for i in range(0, len(combos), chunk)]
pool = Pool(processes=args.cpus)
results = pool.imap(subsample_multiprocess, split_combos)
pool.close()
pool.join()
sub_fluid_dict[N].append(results)
else:
last_run = subsample_multiprocess(N_combos)
sub_fluid_dict[N].append(last_run)
sub_fluid_dict[N]=list(flatten(sub_fluid_dict[N]))
print(len(sub_fluid_dict[N]))
return sub_fluid_dict
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def exponential(x, a, b, c):
return a * np.exp(b * x) + c
def neg_exponential(x, a, b, c):
return a * np.exp(-b * x) + c
def sumOfSquaredError(parameterTuple, x_values, y_curve_values, func):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(x_values, *parameterTuple)
return np.sum((y_curve_values - val) ** 2.0)
def generate_Initial_Parameters(x_values, y_curve_values, func):
# min and max used for bounds
maxX = max(x_values)
minX = min(x_values)
maxY = max(y_curve_values)
minY = min(y_curve_values)
maxXY = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([-maxXY, maxXY]) # seach bounds for a
parameterBounds.append([-maxXY, maxXY]) # seach bounds for b
parameterBounds.append([-maxXY, maxXY]) # seach bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, args=(x_values,y_curve_values, func), seed=3)
return result.x
def create_fluidity_results(figure_output, results_output):
total_variance = []
for i in range(3, iso_num + 1):
if i in permutation_list:
total_variance.append(np.var(sub_fluid_dict[i], ddof = 1) + pan_variance)
else:
total_variance.append(np.var(sub_fluid_dict[i]) + pan_variance)
total_variance = np.array(total_variance)
total_stderr = np.array([x**(1/2) for x in total_variance])
y_fluidity_values = np.array([pan_fluidity for i in range(3, iso_num + 1)])
x_labels = np.array([i for i in range(3, iso_num + 1)])
stderr_bottom = np.array([(pan_fluidity - v) for v in total_stderr])
stderr_top = np.array([(pan_fluidity + v) for v in total_stderr])
fig, ax = plt.subplots()
try: # Still had problems sometimes with fitting curves, this solution works best for now
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, exponential)
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, exponential)
popt_t, pcov = curve_fit(exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
popt_b, pcov = curve_fit(exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
if len(set(exponential(x_labels, *popt_t))) > 3 and len(set(exponential(x_labels, *popt_b))) > 3:
plt.fill_between(x_labels, exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
if len(set(exponential(x_labels, *popt_t))) <= 3:
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, neg_exponential)
popt_t, pcov = curve_fit(neg_exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
plt.fill_between(x_labels, neg_exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = neg_exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
else:
pass
if len(set(exponential(x_labels, *popt_b))) <= 3:
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, neg_exponential)
popt_b, pcov = curve_fit(neg_exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
plt.fill_between(x_labels, exponential(x_labels, *popt_t), neg_exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = neg_exponential(x_labels, *popt_b)
else:
pass
except:
pass
ax.set_axisbelow(True)
plt.minorticks_on()
plt.grid(which='minor', axis='y', color='white', linestyle='--', alpha=0.3)
ax.yaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white')
ax.xaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white', alpha=0.5)
ax.tick_params(axis='x', which='minor', bottom=False)
ax.set_facecolor('gainsboro')
plt.plot(x_labels, y_fluidity_values, ls='--', lw=1, color='black') # plot y-values of fluidity
plt.xticks(np.arange(x_labels[0], x_labels[len(x_labels)-1]+1, 1.0)) # make sure x interval is 1
plt.xlim(x_labels[0], x_labels[len(x_labels)-1]) # adjust x limit so it starts with 3 at 0
max_y = max(stderr_top)
min_y = min(stderr_bottom)
plt.ylim((min_y - min_y*0.15), (max_y + max_y*0.15))
plt.xlabel('Number of genomes sampled')
plt.ylabel('Fluidity, '+u'\u03C6')
plt.tight_layout()
plt.savefig(figure_output)
with open(results_output, 'w') as results: # print out fluidity results
results.write('Genomes_Sampled\tFluidity\tTotal_Variance\tTotal_Stderr\tExponential_top\tExponential_bottom\n')
r_out = []
for i in range(0, iso_num-2):
r_out.append([str(i+3), str(pan_fluidity), str(total_variance[i]), str(total_stderr[i]),
str(top_curve[i]), str(bottom_curve[i])])
for line in r_out:
results.write('\t'.join(line) + '\n')
if __name__ == "__main__":
ortho_dict = create_ortho_dictionary(input_file)
iso_num = max([len(v) for v in ortho_dict.values()])
iso_list = list(set(itertools.chain.from_iterable([v for v in ortho_dict.values() if len(v) == iso_num])))
pair_dict = create_pair_dictionary(ortho_dict)
pan_results = compute_fluidity_all_genomes()
pan_fluidity = pan_results[0]
pan_variance = pan_results[1]
permutation_list = []
sub_fluid_dict = genome_subsamples_fluidities(permutation_list)
create_fluidity_results(fluid_fig, fluid_results)
| 46.322388
| 135
| 0.669738
| 2,205
| 15,518
| 4.533787
| 0.203628
| 0.024507
| 0.036011
| 0.035211
| 0.264179
| 0.242173
| 0.209963
| 0.181354
| 0.157847
| 0.079424
| 0
| 0.012767
| 0.222709
| 15,518
| 334
| 136
| 46.461078
| 0.816034
| 0.199704
| 0
| 0.143911
| 0
| 0
| 0.096287
| 0.011213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04428
| false
| 0.01476
| 0.03321
| 0.00738
| 0.114391
| 0.01845
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc100b64b37cc26f7af79a394d9e388ede43f204
| 7,610
|
py
|
Python
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 1
|
2019-02-27T12:59:49.000Z
|
2019-02-27T12:59:49.000Z
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | 4
|
2019-03-07T09:31:51.000Z
|
2019-03-12T15:19:40.000Z
|
osvolbackup/backup.py
|
CCSGroupInternational/osvolbackup
|
d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5
|
[
"Apache-2.0"
] | null | null | null |
#
# This module provides the Instance class that encapsulate some complex server instances related operations
#
from __future__ import print_function
from json import loads
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from cinderclient import client as cinder_client
from osvolbackup.server import ServerInstance, ServerNotFound
from osvolbackup.osauth import get_session, VERSION
from osvolbackup.verbose import vprint
from time import time, sleep
class BackupGroup(object):
max_secs_gbi = 300
poll_delay = 10
def __init__(self, serverName):
self.selected_metadata = None
self.selected_backups = []
self.selected_volumes = []
session = self.session = get_session()
self.neutron = neutron_client.Client(session=session)
self.nova = nova_client.Client(VERSION, session=session)
self.cinder = cinder_client.Client(VERSION, session=session)
try:
server = ServerInstance(serverName)
except ServerNotFound:
name = 'osvb_'+serverName
else:
name = 'osvb_'+server.instance.id
self.backup_list = self.cinder.backups.list(search_opts={"name": name})
self.volume_map = {}
if len(self.backup_list) == 0:
raise BackupNotFound(serverName)
# Load metadata from the backup description field
self.backup_meta_data = backup_meta_data = {}
for backup in self.backup_list:
meta_data = loads(backup.description)
backup_meta_data[backup.id] = meta_data
self.volume_map[backup.id] = {"id": backup.volume_id, "size": backup.size}
self.available_backups = sorted(set([b['backup_time'] for b in backup_meta_data.values()]))
def select_by_tag(self, tag):
if tag == 'last':
selected_backup_timestamp = self.available_backups[-1]
else:
raise BackupTooMany(tag)
# Get volumes associated with the selected backup
for backup_id, backup_meta in self.backup_meta_data.iteritems():
if backup_meta['backup_time'] == selected_backup_timestamp:
self.selected_backups.append(backup_id)
self.selected_volumes.append(self.volume_map[backup_id])
self.selected_metadata = backup_meta
def get_volumes(self):
return self.selected_volumes
def restore(self, server=None, network=None, to_project=None, skip_vm=False):
# flavor = self.nova.flavors.find(name=self.selected_metadata['flavor'])
new_volume_list = self._create_volumes(self.selected_volumes, to_project)
# Restore the volumes
block_device_mapping = {}
for i, backup_id in enumerate(self.selected_backups):
vol_index = self.backup_meta_data[backup_id]['vol_index']
new_volume_id = new_volume_list[i].id
vprint("Restoring from backup", backup_id, "to volume", new_volume_id)
dev_name = "vd" + chr(ord('a') + vol_index)
block_device_mapping[dev_name] = new_volume_id
restore = self.cinder.restores.restore(backup_id=backup_id, volume_id=new_volume_id)
restored_volume = self.cinder.volumes.get(restore.volume_id)
self._wait_for(restored_volume, ('restoring-backup',), 'available')
# We need to get again to refresh the metadata
restored_volume = self.cinder.volumes.get(restore.volume_id)
if vol_index == 0:
if not skip_vm:
name = restored_volume.metadata['osvb_name']
flavor = restored_volume.metadata['osvb_flavor']
flavor = self.nova.flavors.find(name=flavor) # name to id
saved_networks = loads(restored_volume.metadata['osvb_network'])
if not skip_vm:
nics = []
if network is not None:
net_name, net_ip = network.split("=")
net_id = self.neutron.list_networks(name=net_name)['networks'][0]['id']
nic_info = {'net-id': net_id, 'v4-fixed-ip': net_ip}
nics.append(nic_info)
else:
for network_name, network_ips in saved_networks.iteritems():
nic_info = {}
nic_info['net-id'] = self.neutron.list_networks(name=network_name)['networks'][0]['id']
nic_info['v4-fixed-ip'] = network_ips[0]
nics.append(nic_info)
target_session = get_session(to_project)
target_nova = nova_client.Client(VERSION, session=target_session)
server = target_nova.servers.create(
name=name, image=None, flavor=flavor, block_device_mapping=block_device_mapping, nics=nics
)
print("Server was restored into instance", server.id)
def _create_volumes(self, volume_list, to_project):
""" Create volumes based """
vprint("Creating volumes for the instance restore")
target_session = get_session(to_project)
target_cinder = cinder_client.Client(VERSION, session=target_session)
vol_list = []
for volume in volume_list:
vprint("Creating %dG volume" % volume['size'])
new_volume = target_cinder.volumes.create(volume['size'])
self._wait_for(new_volume, ('creating',), 'available')
vol_list.append(new_volume)
return vol_list
# Borrowed from https://github.com/Akrog/cinderback/blob/master/cinderback.py
def _wait_for(self, resource, allowed_states, expected_states=None, timeout=None):
"""Waits for a resource to come to a specific state.
:param resource: Resource we want to wait for
:param allowed_states: iterator with allowed intermediary states
:param expected_states: states we expect to have at the end, if None
is supplied then anything is good.
:param need_up: If wee need backup service to be up and running
:return: The most updated resource
"""
if timeout:
deadline = time() + timeout
else:
deadline = time() + (self.max_secs_gbi * resource.size)
while resource.status in allowed_states:
sleep(self.poll_delay)
if deadline <= time():
raise TimeoutError(what=resource)
resource = resource.manager.get(resource.id)
if expected_states and resource.status not in expected_states:
raise UnexpectedStatus(what=resource, intermediate=allowed_states, final=expected_states)
return resource
class BackupException(Exception):
def __init__(self, what, *args, **kwargs):
super(BackupException, self).__init__(*args, **kwargs)
self.what = what
def __str__(self):
return u'%s: %s' % (self.__class__.__name__, self.what)
class UnexpectedStatus(BackupException):
def __init__(self, what, intermediate='', final='', *args, **kwargs):
super(UnexpectedStatus, self).__init__(what, *args, **kwargs)
self.intermediate = intermediate
self.final = final
def __str__(self):
if self.intermediate or self.final:
steps = (' [intermediate: %s, final: %s]' % (self.intermediate, self.final))
else:
steps = ''
return (u'%s: Status is %s%s' %
(self.__class__.__name__, self.what.status, steps))
class BackupNotFound(BackupException):
pass
class BackupTooMany(BackupException):
pass
| 42.044199
| 107
| 0.642181
| 906
| 7,610
| 5.144592
| 0.233996
| 0.025746
| 0.018022
| 0.022313
| 0.14675
| 0.119288
| 0.060931
| 0.021026
| 0.021026
| 0
| 0
| 0.002676
| 0.263469
| 7,610
| 180
| 108
| 42.277778
| 0.828903
| 0.109724
| 0
| 0.128788
| 0
| 0
| 0.055547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0.015152
| 0.068182
| 0.015152
| 0.234848
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc107e595b21342f82e5161a579e155e45e95a50
| 13,314
|
py
|
Python
|
gammapy/estimators/profile.py
|
JohannesBuchner/gammapy
|
48769519f04b7df7b3e4580ebb61396445790bc3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-02T21:35:27.000Z
|
2021-02-02T21:35:27.000Z
|
gammapy/estimators/profile.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | 2
|
2018-08-09T20:49:13.000Z
|
2019-01-23T17:30:49.000Z
|
gammapy/estimators/profile.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
class ImageProfileEstimator(Estimator):
"""Estimate profile from image.
Parameters
----------
x_edges : `~astropy.coordinates.Angle`
Coordinate edges to define a custom measument grid (optional).
method : ['sum', 'mean']
Compute sum or mean within profile bins.
axis : ['lon', 'lat', 'radial']
Along which axis to estimate the profile.
center : `~astropy.coordinates.SkyCoord`
Center coordinate for the radial profile option.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from gammapy.maps import ImageProfileEstimator
from gammapy.maps import Map
from astropy import units as u
# load example data
filename = '$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz'
fermi_cts = Map.read(filename)
# set up profile estimator and run
p = ImageProfileEstimator(axis='lon', method='sum')
profile = p.run(fermi_cts)
# smooth profile and plot
smoothed = profile.smooth(kernel='gauss')
smoothed.peek()
plt.show()
"""
tag = "ImageProfileEstimator"
def __init__(self, x_edges=None, method="sum", axis="lon", center=None):
self._x_edges = x_edges
if method not in ["sum", "mean"]:
raise ValueError("Not a valid method, choose either 'sum' or 'mean'")
if axis not in ["lon", "lat", "radial"]:
raise ValueError("Not a valid axis, choose either 'lon' or 'lat'")
if method == "radial" and center is None:
raise ValueError("Please provide center coordinate for radial profiles")
self.parameters = {"method": method, "axis": axis, "center": center}
def _get_x_edges(self, image):
if self._x_edges is not None:
return self._x_edges
p = self.parameters
coordinates = image.geom.get_coord(mode="edges").skycoord
if p["axis"] == "lat":
x_edges = coordinates[:, 0].data.lat
elif p["axis"] == "lon":
lon = coordinates[0, :].data.lon
x_edges = lon.wrap_at("180d")
elif p["axis"] == "radial":
rad_step = image.geom.pixel_scales.mean()
corners = [0, 0, -1, -1], [0, -1, 0, -1]
rad_max = coordinates[corners].separation(p["center"]).max()
x_edges = Angle(np.arange(0, rad_max.deg, rad_step.deg), unit="deg")
return x_edges
def _estimate_profile(self, image, image_err, mask):
p = self.parameters
labels = self._label_image(image, mask)
profile_err = None
index = np.arange(1, len(self._get_x_edges(image)))
if p["method"] == "sum":
profile = scipy.ndimage.sum(image.data, labels.data, index)
if image.unit.is_equivalent("counts"):
profile_err = np.sqrt(profile)
elif image_err:
# gaussian error propagation
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum)
elif p["method"] == "mean":
# gaussian error propagation
profile = scipy.ndimage.mean(image.data, labels.data, index)
if image_err:
N = scipy.ndimage.sum(~np.isnan(image_err.data), labels.data, index)
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum) / N
return profile, profile_err
def _label_image(self, image, mask=None):
p = self.parameters
coordinates = image.geom.get_coord().skycoord
x_edges = self._get_x_edges(image)
if p["axis"] == "lon":
lon = coordinates.data.lon.wrap_at("180d")
data = np.digitize(lon.degree, x_edges.deg)
elif p["axis"] == "lat":
lat = coordinates.data.lat
data = np.digitize(lat.degree, x_edges.deg)
elif p["axis"] == "radial":
separation = coordinates.separation(p["center"])
data = np.digitize(separation.degree, x_edges.deg)
if mask is not None:
# assign masked values to background
data[mask.data] = 0
return image.copy(data=data)
def run(self, image, image_err=None, mask=None):
"""Run image profile estimator.
Parameters
----------
image : `~gammapy.maps.Map`
Input image to run profile estimator on.
image_err : `~gammapy.maps.Map`
Input error image to run profile estimator on.
mask : `~gammapy.maps.Map`
Optional mask to exclude regions from the measurement.
Returns
-------
profile : `ImageProfile`
Result image profile object.
"""
p = self.parameters
if image.unit.is_equivalent("count"):
image_err = image.copy(data=np.sqrt(image.data))
profile, profile_err = self._estimate_profile(image, image_err, mask)
result = Table()
x_edges = self._get_x_edges(image)
result["x_min"] = x_edges[:-1]
result["x_max"] = x_edges[1:]
result["x_ref"] = (x_edges[:-1] + x_edges[1:]) / 2
result["profile"] = profile * image.unit
if profile_err is not None:
result["profile_err"] = profile_err * image.unit
result.meta["PROFILE_TYPE"] = p["axis"]
return ImageProfile(result)
class ImageProfile:
"""Image profile class.
The image profile data is stored in `~astropy.table.Table` object, with the
following columns:
* `x_ref` Coordinate bin center (required).
* `x_min` Coordinate bin minimum (optional).
* `x_max` Coordinate bin maximum (optional).
* `profile` Image profile data (required).
* `profile_err` Image profile data error (optional).
Parameters
----------
table : `~astropy.table.Table`
Table instance with the columns specified as above.
"""
def __init__(self, table):
self.table = table
def smooth(self, kernel="box", radius="0.1 deg", **kwargs):
r"""Smooth profile with error propagation.
Smoothing is described by a convolution:
.. math::
x_j = \sum_i x_{(j - i)} h_i
Where :math:`h_i` are the coefficients of the convolution kernel.
The corresponding error on :math:`x_j` is then estimated using Gaussian
error propagation, neglecting correlations between the individual
:math:`x_{(j - i)}`:
.. math::
\Delta x_j = \sqrt{\sum_i \Delta x^{2}_{(j - i)} h^{2}_i}
Parameters
----------
kernel : {'gauss', 'box'}
Kernel shape
radius : `~astropy.units.Quantity`, str or float
Smoothing width given as quantity or float. If a float is given it
is interpreted as smoothing width in pixels. If an (angular) quantity
is given it is converted to pixels using `xref[1] - x_ref[0]`.
kwargs : dict
Keyword arguments passed to `~scipy.ndimage.uniform_filter`
('box') and `~scipy.ndimage.gaussian_filter` ('gauss').
Returns
-------
profile : `ImageProfile`
Smoothed image profile.
"""
table = self.table.copy()
profile = table["profile"]
radius = u.Quantity(radius)
radius = np.abs(radius / np.diff(self.x_ref))[0]
width = 2 * radius.value + 1
if kernel == "box":
smoothed = scipy.ndimage.uniform_filter(
profile.astype("float"), width, **kwargs
)
# renormalize data
if table["profile"].unit.is_equivalent("count"):
smoothed *= int(width)
smoothed_err = np.sqrt(smoothed)
elif "profile_err" in table.colnames:
profile_err = table["profile_err"]
# use gaussian error propagation
box = Box1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, box.array ** 2)
smoothed_err = np.sqrt(err_sum)
elif kernel == "gauss":
smoothed = scipy.ndimage.gaussian_filter(
profile.astype("float"), width, **kwargs
)
# use gaussian error propagation
if "profile_err" in table.colnames:
profile_err = table["profile_err"]
gauss = Gaussian1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, gauss.array ** 2)
smoothed_err = np.sqrt(err_sum)
else:
raise ValueError("Not valid kernel choose either 'box' or 'gauss'")
table["profile"] = smoothed * self.table["profile"].unit
if "profile_err" in table.colnames:
table["profile_err"] = smoothed_err * self.table["profile"].unit
return self.__class__(table)
def plot(self, ax=None, **kwargs):
"""Plot image profile.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to `~matplotlib.axes.Axes.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
x = self.x_ref.value
ax.plot(x, y, **kwargs)
ax.set_xlabel("lon")
ax.set_ylabel("profile")
ax.set_xlim(x.max(), x.min())
return ax
def plot_err(self, ax=None, **kwargs):
"""Plot image profile error as band.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to plt.fill_between()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
ymin = y - self.table["profile_err"].data
ymax = y + self.table["profile_err"].data
x = self.x_ref.value
# plotting defaults
kwargs.setdefault("alpha", 0.5)
ax.fill_between(x, ymin, ymax, **kwargs)
ax.set_xlabel("x (deg)")
ax.set_ylabel("profile")
return ax
@property
def x_ref(self):
"""Reference x coordinates."""
return self.table["x_ref"].quantity
@property
def x_min(self):
"""Min. x coordinates."""
return self.table["x_min"].quantity
@property
def x_max(self):
"""Max. x coordinates."""
return self.table["x_max"].quantity
@property
def profile(self):
"""Image profile quantity."""
return self.table["profile"].quantity
@property
def profile_err(self):
"""Image profile error quantity."""
try:
return self.table["profile_err"].quantity
except KeyError:
return None
def peek(self, figsize=(8, 4.5), **kwargs):
"""Show image profile and error.
Parameters
----------
**kwargs : dict
Keyword arguments passed to `ImageProfile.plot_profile()`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax = self.plot(ax, **kwargs)
if "profile_err" in self.table.colnames:
ax = self.plot_err(ax, color=kwargs.get("c"))
return ax
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
| 31.928058
| 84
| 0.570828
| 1,573
| 13,314
| 4.724094
| 0.191354
| 0.036334
| 0.019378
| 0.013457
| 0.254475
| 0.229579
| 0.173866
| 0.130803
| 0.111425
| 0.088817
| 0
| 0.006627
| 0.308623
| 13,314
| 416
| 85
| 32.004808
| 0.800652
| 0.326048
| 0
| 0.248649
| 0
| 0
| 0.096675
| 0.005192
| 0
| 0
| 0
| 0.002404
| 0
| 1
| 0.086486
| false
| 0
| 0.054054
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc1121d14735ee8c8c982d686f96751beb66af86
| 7,270
|
py
|
Python
|
env/lib/python3.8/site-packages/versatileimagefield/mixins.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | 1
|
2021-04-07T16:25:20.000Z
|
2021-04-07T16:25:20.000Z
|
env/lib/python3.8/site-packages/versatileimagefield/mixins.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | 9
|
2021-03-19T03:06:53.000Z
|
2022-03-12T00:37:04.000Z
|
myvenv/lib/python3.6/site-packages/versatileimagefield/mixins.py
|
yog240597/saleor
|
b75a23827a4ec2ce91637f0afe6808c9d09da00a
|
[
"CC-BY-4.0"
] | 1
|
2021-04-23T15:01:05.000Z
|
2021-04-23T15:01:05.000Z
|
"""versatileimagefield Field mixins."""
import os
import re
from .datastructures import FilterLibrary
from .registry import autodiscover, versatileimagefield_registry
from .settings import (
cache,
VERSATILEIMAGEFIELD_CREATE_ON_DEMAND,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
from .validators import validate_ppoi
autodiscover()
filter_regex_snippet = r'__({registered_filters})__'.format(
registered_filters='|'.join([
key
for key, filter_cls in versatileimagefield_registry._filter_registry.items()
])
)
sizer_regex_snippet = r'-({registered_sizers})-(\d+)x(\d+)(?:-\d+)?'.format(
registered_sizers='|'.join([
sizer_cls.get_filename_key_regex()
for key, sizer_cls in versatileimagefield_registry._sizedimage_registry.items()
])
)
filter_regex = re.compile(filter_regex_snippet + '$')
sizer_regex = re.compile(sizer_regex_snippet + '$')
filter_and_sizer_regex = re.compile(
filter_regex_snippet + sizer_regex_snippet + '$'
)
class VersatileImageMixIn(object):
"""A mix-in that provides the filtering/sizing API."""
def __init__(self, *args, **kwargs):
"""Construct PPOI and create_on_demand."""
self._create_on_demand = VERSATILEIMAGEFIELD_CREATE_ON_DEMAND
super(VersatileImageMixIn, self).__init__(*args, **kwargs)
# Setting initial ppoi
if self.field.ppoi_field:
instance_ppoi_value = getattr(
self.instance,
self.field.ppoi_field,
(0.5, 0.5)
)
self.ppoi = instance_ppoi_value
else:
self.ppoi = (0.5, 0.5)
@property
def url(self):
"""
Return the appropriate URL.
URL is constructed based on these field conditions:
* If empty (not `self.name`) and a placeholder is defined, the
URL to the placeholder is returned.
* Otherwise, defaults to vanilla ImageFieldFile behavior.
"""
if not self.name and self.field.placeholder_image_name:
return self.storage.url(self.field.placeholder_image_name)
return super(VersatileImageMixIn, self).url
@property
def create_on_demand(self):
"""create_on_demand getter."""
return self._create_on_demand
@create_on_demand.setter
def create_on_demand(self, value):
if not isinstance(value, bool):
raise ValueError(
"`create_on_demand` must be a boolean"
)
else:
self._create_on_demand = value
self.build_filters_and_sizers(self.ppoi, value)
@property
def ppoi(self):
"""Primary Point of Interest (ppoi) getter."""
return self._ppoi_value
@ppoi.setter
def ppoi(self, value):
"""Primary Point of Interest (ppoi) setter."""
ppoi = validate_ppoi(
value,
return_converted_tuple=True
)
if ppoi is not False:
self._ppoi_value = ppoi
self.build_filters_and_sizers(ppoi, self.create_on_demand)
def build_filters_and_sizers(self, ppoi_value, create_on_demand):
"""Build the filters and sizers for a field."""
name = self.name
if not name and self.field.placeholder_image_name:
name = self.field.placeholder_image_name
self.filters = FilterLibrary(
name,
self.storage,
versatileimagefield_registry,
ppoi_value,
create_on_demand
)
for (
attr_name,
sizedimage_cls
) in versatileimagefield_registry._sizedimage_registry.items():
setattr(
self,
attr_name,
sizedimage_cls(
path_to_image=name,
storage=self.storage,
create_on_demand=create_on_demand,
ppoi=ppoi_value
)
)
def get_filtered_root_folder(self):
"""Return the location where filtered images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')
def get_sized_root_folder(self):
"""Return the location where sized images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')
def get_filtered_sized_root_folder(self):
"""Return the location where filtered + sized images are stored."""
sized_root_folder = self.get_sized_root_folder()
return os.path.join(
sized_root_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
def delete_matching_files_from_storage(self, root_folder, regex):
"""
Delete files in `root_folder` which match `regex` before file ext.
Example values:
* root_folder = 'foo/'
* self.name = 'bar.jpg'
* regex = re.compile('-baz')
Result:
* foo/bar-baz.jpg <- Deleted
* foo/bar-biz.jpg <- Not deleted
"""
if not self.name: # pragma: no cover
return
try:
directory_list, file_list = self.storage.listdir(root_folder)
except OSError: # pragma: no cover
pass
else:
folder, filename = os.path.split(self.name)
basename, ext = os.path.splitext(filename)
for f in file_list:
if not f.startswith(basename) or not f.endswith(ext): # pragma: no cover
continue
tag = f[len(basename):-len(ext)]
assert f == basename + tag + ext
if regex.match(tag) is not None:
file_location = os.path.join(root_folder, f)
self.storage.delete(file_location)
cache.delete(
self.storage.url(file_location)
)
print(
"Deleted {file} (created from: {original})".format(
file=os.path.join(root_folder, f),
original=self.name
)
)
def delete_filtered_images(self):
"""Delete all filtered images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_root_folder(),
filter_regex
)
def delete_sized_images(self):
"""Delete all sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_sized_root_folder(),
sizer_regex
)
def delete_filtered_sized_images(self):
"""Delete all filtered sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_sized_root_folder(),
filter_and_sizer_regex
)
def delete_all_created_images(self):
"""Delete all images created from `self.name`."""
self.delete_filtered_images()
self.delete_sized_images()
self.delete_filtered_sized_images()
| 34.454976
| 90
| 0.599037
| 796
| 7,270
| 5.202261
| 0.207286
| 0.03091
| 0.054093
| 0.021734
| 0.38469
| 0.271674
| 0.229896
| 0.137648
| 0.085004
| 0.085004
| 0
| 0.001601
| 0.312792
| 7,270
| 210
| 91
| 34.619048
| 0.827262
| 0.169051
| 0
| 0.116883
| 0
| 0
| 0.025887
| 0.011829
| 0
| 0
| 0
| 0
| 0.006494
| 1
| 0.097403
| false
| 0.006494
| 0.038961
| 0
| 0.194805
| 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc11ec393a7dcebc05211e5be317a56b62dc07c0
| 9,450
|
py
|
Python
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 330
|
2020-09-14T23:10:16.000Z
|
2022-03-30T19:49:19.000Z
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 52
|
2020-09-30T06:10:51.000Z
|
2022-03-31T19:25:16.000Z
|
differential_privacy/run_federated.py
|
HanGuo97/federated
|
7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9
|
[
"BSD-3-Clause"
] | 119
|
2020-09-24T04:54:46.000Z
|
2022-03-31T21:46:57.000Z
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
train_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=FLAGS.client_epochs_per_round,
batch_size=FLAGS.client_batch_size,
max_elements=FLAGS.max_elements_per_client)
task = task_utils.create_task_from_flags(train_client_spec)
logging.info('Trainable weights:')
for weight in task.model_fn().trainable_variables:
logging.info('name: %s shape: %s', weight.name, weight.shape)
if FLAGS.uniform_weighting:
client_weighting = tff.learning.ClientWeighting.UNIFORM
elif FLAGS.task == 'shakespeare_character' or FLAGS.task == 'stackoverflow_word':
def client_weighting(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weighting = None
if FLAGS.noise_multiplier is None:
if FLAGS.uniform_weighting:
aggregation_factory = tff.aggregators.UnweightedMeanFactory()
else:
aggregation_factory = tff.aggregators.MeanFactory()
if FLAGS.clip is not None:
if FLAGS.clip <= 0:
raise ValueError('clip must be positive if clipping is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
clip = FLAGS.clip
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=FLAGS.clip,
target_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
aggregation_factory = tff.aggregators.clipping_factory(
clip, aggregation_factory)
else:
if not FLAGS.uniform_weighting:
raise ValueError(
'Differential privacy is only implemented for uniform weighting.')
if FLAGS.noise_multiplier <= 0:
raise ValueError('noise_multiplier must be positive if DP is enabled.')
if FLAGS.clip is None or FLAGS.clip <= 0:
raise ValueError('clip must be positive if DP is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
clip=FLAGS.clip)
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
initial_l2_norm_clip=FLAGS.clip,
target_unclipped_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn=task.model_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
client_optimizer_fn=client_optimizer_fn,
model_update_aggregation_factory=aggregation_factory)
train_data = task.datasets.train_data.preprocess(
task.datasets.train_preprocess_fn)
training_process = (
tff.simulation.compose_dataset_computation_with_iterative_process(
train_data.dataset_computation, iterative_process))
training_selection_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
train_data.client_ids, random_seed=FLAGS.client_datasets_random_seed),
size=FLAGS.clients_per_round)
test_data = task.datasets.get_centralized_test_data()
validation_data = test_data.take(FLAGS.num_validation_examples)
federated_eval = tff.learning.build_federated_evaluation(task.model_fn)
evaluation_selection_fn = lambda round_num: [validation_data]
def evaluation_fn(state, evaluation_data):
return federated_eval(state.model, evaluation_data)
program_state_manager, metrics_managers = training_utils.create_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
_write_hparam_flags()
state = tff.simulation.run_training_process(
training_process=training_process,
training_selection_fn=training_selection_fn,
total_rounds=FLAGS.total_rounds,
evaluation_fn=evaluation_fn,
evaluation_selection_fn=evaluation_selection_fn,
rounds_per_evaluation=FLAGS.rounds_per_eval,
program_state_manager=program_state_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers)
test_metrics = federated_eval(state.model, [test_data])
for metrics_manager in metrics_managers:
metrics_manager.release(test_metrics, FLAGS.total_rounds + 1)
if __name__ == '__main__':
app.run(main)
| 42.760181
| 91
| 0.742963
| 1,211
| 9,450
| 5.500413
| 0.231214
| 0.026422
| 0.030026
| 0.036031
| 0.25912
| 0.218135
| 0.183756
| 0.143522
| 0.130911
| 0.110194
| 0
| 0.004398
| 0.181905
| 9,450
| 220
| 92
| 42.954545
| 0.857198
| 0.09619
| 0
| 0.153846
| 0
| 0
| 0.202561
| 0.028669
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023669
| false
| 0
| 0.059172
| 0.011834
| 0.094675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc11f9759b82ea3a650e3c9261504b9c80e953f0
| 417
|
py
|
Python
|
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
waymo_kitti_converter/tools/visual_point_cloud.py
|
anhvth/Pseudo_Lidar_V2
|
d7a29ffc811e315df25bba2a43acf288d4ceb30e
|
[
"MIT"
] | null | null | null |
import open3d as o3d
import numpy as np
pc_load_pathname = '/home/caizhongang/github/waymo_kitti_converter/007283-000.bin'
pc = np.fromfile(pc_load_pathname, dtype=np.float32).reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
visual = [pcd, axis]
o3d.visualization.draw_geometries(visual)
| 34.75
| 82
| 0.781775
| 63
| 417
| 5.031746
| 0.666667
| 0.037855
| 0.088328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063158
| 0.088729
| 417
| 11
| 83
| 37.909091
| 0.771053
| 0
| 0
| 0
| 0
| 0
| 0.146283
| 0.146283
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc12305fff510e126657094db88dd638e8718e01
| 1,042
|
py
|
Python
|
part01_basic/for_while_loop.py
|
ApprenticeOne/python_learn
|
2433726b3f164526e8a8fa18739854e052d76a2e
|
[
"MIT"
] | null | null | null |
part01_basic/for_while_loop.py
|
ApprenticeOne/python_learn
|
2433726b3f164526e8a8fa18739854e052d76a2e
|
[
"MIT"
] | null | null | null |
part01_basic/for_while_loop.py
|
ApprenticeOne/python_learn
|
2433726b3f164526e8a8fa18739854e052d76a2e
|
[
"MIT"
] | null | null | null |
import random
from math import sqrt
sum = 0
for x in range(101):
sum += x
print(sum)
'''
range(101) 0-100 一共101个数
range(1,101) 1-100
range(1,101,2) 1-100间的奇数 步长为2
range(100,0,-2) 100-0间的偶数 步长为-2
'''
sum = 0
for x in range(100, 0, -2):
sum += x
print(sum)
# while
# 0-100间的随机数
answer = random.randint(0, 100)
count = 0
while True:
count += 1
number = int(input("Please enter the number: "))
if number < answer:
print("more larger")
elif number > answer:
print("more smaller")
else:
print("right")
print('you got d% times to get right answer' % count)
for i in range(1, 10):
for j in range(1, i + 1):
print('%d*%d=%d' % (i, j, i * j), end='\t')
print()
# 输入一个正整数判断是不是素数
num = int(input('请输入一个正整数: '))
end = int(sqrt(num))
is_prime = True
# 为什么要放一个end 如果这个数有一个小于sqrt的因数
# 就一定会有一个大于sqrt的因数与之对应
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
print('%d是素数' % num)
else:
print('%d不是素数' % num)
| 17.366667
| 53
| 0.589251
| 168
| 1,042
| 3.636905
| 0.380952
| 0.057283
| 0.02946
| 0.05401
| 0.0491
| 0.0491
| 0
| 0
| 0
| 0
| 0
| 0.086452
| 0.256238
| 1,042
| 59
| 54
| 17.661017
| 0.701935
| 0.077735
| 0
| 0.216216
| 0
| 0
| 0.143713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0.27027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc140cda2ae3ddb2fa94e33b0e36406cb6293308
| 12,340
|
py
|
Python
|
src/toil/batchSystems/htcondor.py
|
ElementGenomicsInc/toil
|
e29a07db194469afba3edf90ffeee8f981f7344b
|
[
"Apache-2.0"
] | 2
|
2019-01-16T03:55:57.000Z
|
2019-01-16T04:04:38.000Z
|
src/toil/batchSystems/htcondor.py
|
ElementGenomicsInc/toil
|
e29a07db194469afba3edf90ffeee8f981f7344b
|
[
"Apache-2.0"
] | 4
|
2018-10-02T00:39:18.000Z
|
2018-10-02T00:52:31.000Z
|
src/toil/batchSystems/htcondor.py
|
ElementGenomicsInc/toil
|
e29a07db194469afba3edf90ffeee8f981f7344b
|
[
"Apache-2.0"
] | 2
|
2018-10-09T06:31:52.000Z
|
2018-11-16T00:49:40.000Z
|
# Copyright (C) 2018, HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import str
import sys
import os
import logging
import time
import math
from toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem
import htcondor
import classad
logger = logging.getLogger(__name__)
class HTCondorBatchSystem(AbstractGridEngineBatchSystem):
# When using HTCondor, the Schedd handles scheduling
class Worker(AbstractGridEngineBatchSystem.Worker):
# Override the createJobs method so that we can use htcondor.Submit objects
# and so that we can get disk allocation requests and ceil the CPU request.
def createJobs(self, newJob):
activity = False
if newJob is not None:
self.waitingJobs.append(newJob)
# Queue jobs as necessary:
while len(self.waitingJobs) > 0:
activity = True
jobID, cpu, memory, disk, jobName, command = self.waitingJobs.pop(0)
# Prepare the htcondor.Submit object
submitObj = self.prepareSubmission(cpu, memory, disk, jobID, jobName, command)
logger.debug("Submitting %r", submitObj)
# Submit job and get batch system ID (i.e. the ClusterId)
batchJobID = self.submitJob(submitObj)
logger.debug("Submitted job %s", str(batchJobID))
# Store dict for mapping Toil job ID to batch job ID
# TODO: Note that this currently stores a tuple of (batch system
# ID, Task), but the second value is None by default and doesn't
# seem to be used
self.batchJobIDs[jobID] = (batchJobID, None)
# Add to queue of queued ("running") jobs
self.runningJobs.add(jobID)
# Add to allocated resources
self.allocatedCpus[jobID] = int(math.ceil(cpu))
return activity
def prepareSubmission(self, cpu, memory, disk, jobID, jobName, command):
# Convert resource requests
cpu = int(math.ceil(cpu)) # integer CPUs
memory = float(memory)/1024 # memory in KB
disk = float(disk)/1024 # disk in KB
# Workaround for HTCondor Python bindings Unicode conversion bug
command = command.encode('utf-8')
# Execute the entire command as /bin/sh -c "command"
# TODO: Transfer the jobStore directory if using a local file store with a relative path.
submit_parameters = {
'executable': '/bin/sh',
'transfer_executable': 'False',
'arguments': '''"-c '{0}'"'''.format(command),
'environment': self.getEnvString(),
'request_cpus': '{0}'.format(cpu),
'request_memory': '{0:.3f}KB'.format(memory),
'request_disk': '{0:.3f}KB'.format(disk),
'leave_in_queue': '(JobStatus == 4)',
'+IsToilJob': 'True',
'+ToilJobID': '{0}'.format(jobID),
'+ToilJobName': '"{0}"'.format(jobName),
'+ToilJobKilled': 'False',
}
# Return the Submit object
return htcondor.Submit(submit_parameters)
def submitJob(self, submitObj):
# Queue the job using a Schedd transaction
schedd = self.connectSchedd()
with schedd.transaction() as txn:
batchJobID = submitObj.queue(txn)
# Return the ClusterId
return batchJobID
def getRunningJobIDs(self):
# Get all Toil jobs that are running
requirements = '(JobStatus == 2) && (IsToilJob)'
projection = ['ClusterId', 'ToilJobID', 'EnteredCurrentStatus']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements,
projection = projection)
# Only consider the Toil jobs that are part of this workflow
batchJobIDs = [batchJobID for (batchJobID, task) in self.batchJobIDs.values()]
job_runtimes = {}
for ad in ads:
batchJobID = int(ad['ClusterId'])
jobID = int(ad['ToilJobID'])
if not (batchJobID in batchJobIDs):
continue
# HTCondor stores the start of the runtime as a Unix timestamp
runtime = time.time() - ad['EnteredCurrentStatus']
job_runtimes[jobID] = runtime
return job_runtimes
def killJob(self, jobID):
batchJobID = self.batchJobIDs[jobID][0]
logger.debug("Killing HTCondor job {0}".format(batchJobID))
# Set the job to be killed when its exit status is checked
schedd = self.connectSchedd()
job_spec = '(ClusterId == {0})'.format(batchJobID)
schedd.edit(job_spec, 'ToilJobKilled', 'True')
def getJobExitCode(self, batchJobID):
logger.debug("Getting exit code for HTCondor job {0}".format(batchJobID))
status = {
1: 'Idle',
2: 'Running',
3: 'Removed',
4: 'Completed',
5: 'Held',
6: 'Transferring Output',
7: 'Suspended'
}
requirements = '(ClusterId == {0})'.format(batchJobID)
projection = ['JobStatus', 'ToilJobKilled', 'ExitCode',
'HoldReason', 'HoldReasonSubCode']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements, projection = projection)
# Make sure a ClassAd was returned
try:
ad = ads.next()
except StopIteration:
logger.error(
"No HTCondor ads returned using constraint: {0}".format(requirements))
raise
# Make sure only one ClassAd was returned
try:
ads.next()
except StopIteration:
pass
else:
logger.warning(
"Multiple HTCondor ads returned using constraint: {0}".format(requirements))
if ad['ToilJobKilled']:
logger.debug("HTCondor job {0} was killed by Toil".format(batchJobID))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
elif status[ad['JobStatus']] == 'Completed':
logger.debug("HTCondor job {0} completed with exit code {1}".format(
batchJobID, ad['ExitCode']))
# Remove the job from the Schedd and return its exit code
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return int(ad['ExitCode'])
elif status[ad['JobStatus']] == 'Held':
logger.error("HTCondor job {0} was held: '{1} (sub code {2})'".format(
batchJobID, ad['HoldReason'], ad['HoldReasonSubCode']))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
else: # Job still running or idle or doing something else
logger.debug("HTCondor job {0} has not completed (Status: {1})".format(
batchJobID, status[ad['JobStatus']]))
return None
"""
Implementation-specific helper methods
"""
def connectSchedd(self):
'''Connect to HTCondor Schedd and return a Schedd object'''
condor_host = os.getenv('TOIL_HTCONDOR_COLLECTOR')
schedd_name = os.getenv('TOIL_HTCONDOR_SCHEDD')
# If TOIL_HTCONDOR_ variables are set, use them to find the Schedd
if condor_host and schedd_name:
logger.debug(
"Connecting to HTCondor Schedd {0} using Collector at {1}".format(
schedd_name, condor_host))
try:
schedd_ad = htcondor.Collector(condor_host).locate(
htcondor.DaemonTypes.Schedd, schedd_name)
except IOError:
logger.error(
"Could not connect to HTCondor Collector at {0}".format(condor_host))
raise
except ValueError:
logger.error(
"Could not find HTCondor Schedd with name {0}".format(schedd_name))
raise
else:
schedd = htcondor.Schedd(schedd_ad)
# Otherwise assume the Schedd is on the local machine
else:
logger.debug("Connecting to HTCondor Schedd on local machine")
schedd = htcondor.Schedd()
# Ping the Schedd to make sure it's there and responding
try:
schedd.xquery(limit = 0)
except RuntimeError:
logger.error("Could not connect to HTCondor Schedd")
raise
return schedd
def getEnvString(self):
'''Build an environment string that a HTCondor Submit object can use.
For examples of valid strings, see:
http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html#man-condor-submit-environment
'''
env_items = []
if self.boss.environment:
for key, value in self.boss.environment.items():
# Each variable should be in the form of <key>='<value>'
env_string = key + "="
# The entire value should be encapsulated in single quotes
# Quote marks (single or double) that are part of the value should be duplicated
env_string += "'" + value.replace("'", "''").replace('"', '""') + "'"
env_items.append(env_string)
# The entire string should be encapsulated in double quotes
# Each variable should be separated by a single space
return '"' + ' '.join(env_items) + '"'
# Override the issueBatchJob method so HTCondor can be given the disk request
def issueBatchJob(self, jobNode):
# Avoid submitting internal jobs to the batch queue, handle locally
localID = self.handleLocalJob(jobNode)
if localID:
return localID
else:
self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk)
jobID = self.getNextJobID()
self.currentJobs.add(jobID)
# Add the jobNode.disk and jobNode.jobName to the job tuple
self.newJobsQueue.put((jobID, jobNode.cores, jobNode.memory, jobNode.disk, jobNode.jobName, jobNode.command))
logger.debug("Issued the job command: %s with job id: %s ", jobNode.command, str(jobID))
return jobID
@classmethod
def obtainSystemConstants(cls):
# Since it's not always clear what the max cpus and max memory available
# in an HTCondor slot might be, use some reasonable constants for now.
# TODO: Use a htcondor.Collector().query() to determine reasonable values.
max_cpu = 4
max_mem = 4e9
return max_cpu, max_mem
| 40.19544
| 121
| 0.569044
| 1,296
| 12,340
| 5.374228
| 0.299383
| 0.015075
| 0.017085
| 0.018665
| 0.141278
| 0.123331
| 0.103518
| 0.087581
| 0.06748
| 0.06748
| 0
| 0.008156
| 0.344246
| 12,340
| 306
| 122
| 40.326797
| 0.85257
| 0.257293
| 0
| 0.168539
| 0
| 0
| 0.148749
| 0.002557
| 0
| 0
| 0
| 0.006536
| 0
| 1
| 0.05618
| false
| 0.005618
| 0.05618
| 0
| 0.196629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc15adfda30a5ded3481fe570a59a41b60da2bcc
| 26,347
|
py
|
Python
|
paddlespeech/t2s/modules/tacotron2/decoder.py
|
alanlv/PaddleSpeech
|
7413c9e48ac77fdece45e0b4ffe41f7746ef0583
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/t2s/modules/tacotron2/decoder.py
|
alanlv/PaddleSpeech
|
7413c9e48ac77fdece45e0b4ffe41f7746ef0583
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/t2s/modules/tacotron2/decoder.py
|
alanlv/PaddleSpeech
|
7413c9e48ac77fdece45e0b4ffe41f7746ef0583
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Tacotron2 decoder related modules."""
import paddle
import paddle.nn.functional as F
import six
from paddle import nn
from paddlespeech.t2s.modules.tacotron2.attentions import AttForwardTA
class Prenet(nn.Layer):
"""Prenet module for decoder of Spectrogram prediction network.
This is a module of Prenet in the decoder of Spectrogram prediction network,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Prenet preforms nonlinear conversion
of inputs before input to auto-regressive lstm,
which helps to learn diagonal attentions.
Notes
----------
This module alway applies dropout even in evaluation.
See the detail in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
"""Initialize prenet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of prenet layers.
n_units : int, optional
The number of prenet units.
"""
super().__init__()
self.dropout_rate = dropout_rate
self.prenet = nn.LayerList()
for layer in six.moves.range(n_layers):
n_inputs = idim if layer == 0 else n_units
self.prenet.append(
nn.Sequential(nn.Linear(n_inputs, n_units), nn.ReLU()))
def forward(self, x):
"""Calculate forward propagation.
Parameters
----------
x : Tensor
Batch of input tensors (B, ..., idim).
Returns
----------
Tensor
Batch of output tensors (B, ..., odim).
"""
for i in six.moves.range(len(self.prenet)):
# F.dropout 引入了随机, tacotron2 的 dropout 是不能去掉的
x = F.dropout(self.prenet[i](x))
return x
class Postnet(nn.Layer):
"""Postnet module for Spectrogram prediction network.
This is a module of Postnet in Spectrogram prediction network,
which described in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Postnet predicts refines the predicted
Mel-filterbank of the decoder,
which helps to compensate the detail sturcture of spectrogram.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
n_layers=5,
n_chans=512,
n_filts=5,
dropout_rate=0.5,
use_batch_norm=True, ):
"""Initialize postnet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of layers.
n_filts : int, optional
The number of filter size.
n_units : int, optional
The number of filter channels.
use_batch_norm : bool, optional
Whether to use batch normalization..
dropout_rate : float, optional
Dropout rate..
"""
super().__init__()
self.postnet = nn.LayerList()
for layer in six.moves.range(n_layers - 1):
ichans = odim if layer == 0 else n_chans
ochans = odim if layer == n_layers - 1 else n_chans
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(ochans),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
ichans = n_chans if n_layers != 1 else odim
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(odim),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Dropout(dropout_rate), ))
def forward(self, xs):
"""Calculate forward propagation.
Parameters
----------
xs : Tensor
Batch of the sequences of padded input tensors (B, idim, Tmax).
Returns
----------
Tensor
Batch of padded output tensor. (B, odim, Tmax).
"""
for i in six.moves.range(len(self.postnet)):
xs = self.postnet[i](xs)
return xs
class ZoneOutCell(nn.Layer):
"""ZoneOut Cell module.
This is a module of zoneout described in
`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_.
This code is modified from `eladhoffer/seq2seq.pytorch`_.
Examples
----------
>>> lstm = paddle.nn.LSTMCell(16, 32)
>>> lstm = ZoneOutCell(lstm, 0.5)
.. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`:
https://arxiv.org/abs/1606.01305
.. _`eladhoffer/seq2seq.pytorch`:
https://github.com/eladhoffer/seq2seq.pytorch
"""
def __init__(self, cell, zoneout_rate=0.1):
"""Initialize zone out cell module.
Parameters
----------
cell : nn.Layer:
Paddle recurrent cell module
e.g. `paddle.nn.LSTMCell`.
zoneout_rate : float, optional
Probability of zoneout from 0.0 to 1.0.
"""
super().__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout_rate = zoneout_rate
if zoneout_rate > 1.0 or zoneout_rate < 0.0:
raise ValueError(
"zoneout probability must be in the range from 0.0 to 1.0.")
def forward(self, inputs, hidden):
"""Calculate forward propagation.
Parameters
----------
inputs : Tensor
Batch of input tensor (B, input_size).
hidden : tuple
- Tensor: Batch of initial hidden states (B, hidden_size).
- Tensor: Batch of initial cell states (B, hidden_size).
Returns
----------
Tensor
Batch of next hidden states (B, hidden_size).
tuple:
- Tensor: Batch of next hidden states (B, hidden_size).
- Tensor: Batch of next cell states (B, hidden_size).
"""
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate)
# to have the same output format with LSTMCell in paddle
return next_hidden[0], next_hidden
def _zoneout(self, h, next_h, prob):
# apply recursively
if isinstance(h, tuple):
num_h = len(h)
if not isinstance(prob, tuple):
prob = tuple([prob] * num_h)
return tuple(
[self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)])
if self.training:
mask = paddle.bernoulli(paddle.ones([*paddle.shape(h)]) * prob)
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Decoder(nn.Layer):
"""Decoder module of Spectrogram prediction network.
This is a module of decoder of Spectrogram prediction network in Tacotron2,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The decoder generates the sequence of
features from the sequence of the hidden states.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1, ):
"""Initialize Tacotron2 decoder module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
att nn.Layer
Instance of attention class.
dlayers int, optional
The number of decoder lstm layers.
dunits : int, optional
The number of decoder lstm units.
prenet_layers : int, optional
The number of prenet layers.
prenet_units : int, optional
The number of prenet units.
postnet_layers : int, optional
The number of postnet layers.
postnet_filts : int, optional
The number of postnet filter size.
postnet_chans : int, optional
The number of postnet filter channels.
output_activation_fn : nn.Layer, optional
Activation function for outputs.
cumulate_att_w : bool, optional
Whether to cumulate previous attention weight.
use_batch_norm : bool, optional
Whether to use batch normalization.
use_concate : bool, optional
Whether to concatenate encoder embedding with decoder lstm outputs.
dropout_rate : float, optional
Dropout rate.
zoneout_rate : float, optional
Zoneout rate.
reduction_factor : int, optional
Reduction factor.
"""
super().__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = nn.LayerList()
for layer in six.moves.range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm.append(lstm)
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate, )
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate, )
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = nn.Linear(
iunits, odim * reduction_factor, bias_attr=False)
self.prob_out = nn.Linear(iunits, reduction_factor)
# initialize
# self.apply(decoder_init)
def _zero_state(self, hs):
init_hs = paddle.zeros([paddle.shape(hs)[0], self.lstm[0].hidden_size])
return init_hs
def forward(self, hs, hlens, ys):
"""Calculate forward propagation.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64) padded
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
Tensor
Batch of output tensors after postnet (B, Lmax, odim).
Tensor
Batch of output tensors before postnet (B, Lmax, odim).
Tensor
Batch of logits of stop prediction (B, Lmax).
Tensor
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
# hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
outs += [
self.feat_out(zcs).reshape([paddle.shape(hs)[0], self.odim, -1])
]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
# (B, Lmax)
logits = paddle.concat(logits, axis=1)
# (B, odim, Lmax)
before_outs = paddle.concat(outs, axis=2)
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
if self.reduction_factor > 1:
# (B, odim, Lmax)
before_outs = before_outs.reshape(
[paddle.shape(before_outs)[0], self.odim, -1])
if self.postnet is not None:
# (B, odim, Lmax)
after_outs = before_outs + self.postnet(before_outs)
else:
after_outs = before_outs
# (B, Lmax, odim)
before_outs = before_outs.transpose([0, 2, 1])
# (B, Lmax, odim)
after_outs = after_outs.transpose([0, 2, 1])
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(
self,
h,
threshold=0.5,
minlenratio=0.0,
maxlenratio=10.0,
use_att_constraint=False,
backward_window=None,
forward_window=None, ):
"""Generate the sequence of features given the sequences of characters.
Parameters
----------
h : Tensor
Input sequence of encoder hidden states (T, C).
threshold : float, optional
Threshold to stop generation.
minlenratio : float, optional
Minimum length ratio.
If set to 1.0 and the length of input is 10,
the minimum length of outputs will be 10 * 1 = 10.
minlenratio : float, optional
Minimum length ratio.
If set to 10 and the length of input is 10,
the maximum length of outputs will be 10 * 10 = 100.
use_att_constraint : bool
Whether to apply attention constraint introduced in `Deep Voice 3`_.
backward_window : int
Backward window size in attention constraint.
forward_window : int
Forward window size in attention constraint.
Returns
----------
Tensor
Output sequence of features (L, odim).
Tensor
Output sequence of stop probabilities (L,).
Tensor
Attention weights (L, T).
Note
----------
This computation is performed in auto-regressive manner.
.. _`Deep Voice 3`: https://arxiv.org/abs/1710.07654
"""
# setup
assert len(paddle.shape(h)) == 2
hs = h.unsqueeze(0)
ilens = paddle.shape(h)[0]
maxlen = int(paddle.shape(h)[0] * maxlenratio)
minlen = int(paddle.shape(h)[0] * minlenratio)
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([1, self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# setup for attention constraint
if use_att_constraint:
last_attended_idx = 0
else:
last_attended_idx = None
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
prev_out,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
else:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
# [(1, odim, r), ...]
outs += [self.feat_out(zcs).reshape([1, self.odim, -1])]
# [(r), ...]
probs += [F.sigmoid(self.prob_out(zcs))[0]]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(
outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
if use_att_constraint:
last_attended_idx = int(att_w.argmax())
# check whether to finish generation
if sum(paddle.cast(probs[-1] >= threshold,
'int64')) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
# (1, odim, L)
outs = paddle.concat(outs, axis=2)
if self.postnet is not None:
# (1, odim, L)
outs = outs + self.postnet(outs)
# (L, odim)
outs = outs.transpose([0, 2, 1]).squeeze(0)
probs = paddle.concat(probs, axis=0)
att_ws = paddle.concat(att_ws, axis=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Calculate all of the attention weights.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64)
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
numpy.ndarray
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
# Note: error when use +=
prev_att_w = prev_att_w + att_w
else:
prev_att_w = att_w
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
return att_ws
| 36.290634
| 87
| 0.537405
| 3,109
| 26,347
| 4.391123
| 0.122869
| 0.012892
| 0.012306
| 0.01758
| 0.55274
| 0.508204
| 0.463522
| 0.437811
| 0.391737
| 0.372107
| 0
| 0.016786
| 0.371427
| 26,347
| 725
| 88
| 36.34069
| 0.80756
| 0.355866
| 0
| 0.514045
| 0
| 0
| 0.004039
| 0
| 0
| 0
| 0
| 0
| 0.002809
| 1
| 0.033708
| false
| 0
| 0.014045
| 0
| 0.087079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc180c50e2be52fc8b9a19b64b0af4e3927de263
| 12,367
|
py
|
Python
|
dataset/scan2cad/s2c_collect_pgroup.py
|
jeonghyunkeem/PointGroup
|
fa90830259aeb37d2e0f203471552d2f43cbc60b
|
[
"Apache-2.0"
] | null | null | null |
dataset/scan2cad/s2c_collect_pgroup.py
|
jeonghyunkeem/PointGroup
|
fa90830259aeb37d2e0f203471552d2f43cbc60b
|
[
"Apache-2.0"
] | null | null | null |
dataset/scan2cad/s2c_collect_pgroup.py
|
jeonghyunkeem/PointGroup
|
fa90830259aeb37d2e0f203471552d2f43cbc60b
|
[
"Apache-2.0"
] | null | null | null |
# Jeonghyun Kim, UVR KAIST @jeonghyunct.kaist.ac.kr
import os, sys
import json
import h5py
import numpy as np
import quaternion
import torch
from torch.utils.data import Dataset
BASE_DIR_1 = os.path.dirname(os.path.abspath(__file__)) # scan2cad
BASE_DIR = os.path.dirname(BASE_DIR_1) # dataset
ROOT_DIR = os.path.dirname(BASE_DIR) # PointGroup
DATA_DIR = os.path.dirname(ROOT_DIR) # /root/
DATA_DIR = os.path.join(DATA_DIR, 'Dataset') # /root/Dataset
DUMP_DIR = os.path.join(ROOT_DIR, 'data')
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from s2c_map import CLASS_MAPPING, ID2NAME, CARED_CLASS_MASK
from s2c_config import Scan2CADDatasetConfig
import s2c_utils
sys.path.append(os.path.join(ROOT_DIR, 'models/retrieval/'))
DC = Scan2CADDatasetConfig()
MAX_NUM_POINT = 50000
MAX_NUM_OBJ = 64
INS_NUM_POINT = 2048
FEATURE_DIMENSION = 512
MAX_DATA_SIZE = 15000
CHUNK_SIZE = 1000
INF = 9999
NOT_CARED_ID = np.array([INF]) # wall, floor
# Thresholds
PADDING = 0.05
SCALE_THRASHOLD = 0.05
SEG_THRESHOLD = 1
REMAPPER = np.ones(35, dtype=np.int64) * (-1)
for i, x in enumerate(CARED_CLASS_MASK):
REMAPPER[x] = i
print(f'REMAPPER[{x:2d}] => {i:2d}')
SYM2CLASS = {"__SYM_NONE": 0, "__SYM_ROTATE_UP_2": 1, "__SYM_ROTATE_UP_4": 2, "__SYM_ROTATE_UP_INF": 3}
# functions ==============================================================================================
def from_q_to_6d(q):
q = np.quaternion(q[0], q[1], q[2], q[3])
mat = quaternion.as_rotation_matrix(q) # 3x3
rep6d = mat[:, 0:2].transpose().reshape(-1, 6) # 6
return rep6d
def nn_search(p, ps):
target = torch.from_numpy(ps.copy())
p = torch.from_numpy(p.copy())
p_diff = target - p
p_dist = torch.sum(p_diff**2, dim=-1)
dist, idx = torch.min(p_dist, dim=-1)
return dist.item(), idx.item()
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def compose_mat4(t, q, s, center=None):
if not isinstance(q, np.quaternion):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
C = np.eye(4)
if center is not None:
C[0:3, 3] = center
M = T.dot(R).dot(S).dot(C)
return M
def decompose_mat4(M):
R = M[0:3, 0:3].copy()
sx = np.linalg.norm(R[0:3, 0])
sy = np.linalg.norm(R[0:3, 1])
sz = np.linalg.norm(R[0:3, 2])
s = np.array([sx, sy, sz])
R[:,0] /= sx
R[:,1] /= sy
R[:,2] /= sz
q = quaternion.from_rotation_matrix(R[0:3, 0:3])
t = M[0:3, 3]
return t, q, s
# ========================================================================================================
LOG_N = 100
def print_log(log):
print('-'*LOG_N+'\n'+log+' \n'+'-'*LOG_N)
class Scan2CADCollect(Dataset):
def __init__(self, split_set='train', distr_check=False):
self.data_path = os.path.join(DATA_DIR, 'Scan2CAD/export')
self.out_path = os.path.join(BASE_DIR_1, 'data4')
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
print("Create export directory: {}".format(self.out_path))
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
self.scan_names = []
if split_set in ['all', 'train', 'val', 'test']:
split_filenames = os.path.join(BASE_DIR_1, 'meta_data',
'scan2cad_{}.txt'.format(split_set))
with open(split_filenames, 'r') as f:
self.scan_list = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_list)
self.scan_list = [sname for sname in self.scan_list \
if sname in all_scan_names]
print_log('Dataset for {}: kept {} scans out of {}'.format(split_set, len(self.scan_list), num_scans))
num_scans = len(self.scan_list)
else:
print('illegal split name')
return
filename_json = BASE_DIR_1 + "/full_annotations.json"
assert filename_json
self.dataset = {}
cat_summary = dict.fromkeys(DC.ClassToName, 0)
cat_ids = []
with open(filename_json, 'r') as f:
data = json.load(f)
d = {}
i = -1
for idx, r in enumerate(data):
i_scan = r["id_scan"]
if i_scan not in self.scan_list:
continue
self.scan_names.append(i_scan)
i += 1
d[i] = {}
d[i]['id_scan'] = i_scan
d[i]['trs'] = r["trs"]
n_model = r["n_aligned_models"]
d[i]['n_total'] = n_model
d[i]['models'] = {}
for j in range(n_model):
d[i]['models'][j] = {}
d[i]['models'][j]['trs'] = r["aligned_models"][j]['trs']
d[i]['models'][j]['center'] = r["aligned_models"][j]['center']
d[i]['models'][j]['bbox'] = r["aligned_models"][j]['bbox']
d[i]['models'][j]['sym'] = SYM2CLASS[r["aligned_models"][j]['sym']]
d[i]['models'][j]['fname'] = r["aligned_models"][j]['id_cad']
cat_id = r["aligned_models"][j]['catid_cad']
cat_ids.append(cat_id)
d[i]['models'][j]['cat_id'] = cat_id
cat_class = DC.ShapenetIDtoClass(cat_id)
d[i]['models'][j]['sem_cls'] = cat_class
# category summary
cat_summary[cat_class]+=1
self.dataset = d
self.cat_ids = np.unique(cat_ids)
if distr_check:
for k, v in sorted(cat_summary.items(), key=lambda item:item[1], reverse=True):
print(f'{k:2d}: {DC.ClassToName[k]:12s} => {v:4d}')
def __len__(self):
return len(self.dataset)
def size_check(self, scale, id_scan, sem_cls):
check = False
if scale[0] < SCALE_THRASHOLD:
scale[0] = SCALE_THRASHOLD
check = True
if scale[1] < SCALE_THRASHOLD:
scale[1] = SCALE_THRASHOLD
check = True
if scale[2] < SCALE_THRASHOLD:
scale[2] = SCALE_THRASHOLD
check = True
return scale
def collect(self, N, dump=False):
""" Return dictionary of {verts(x,y,z): cad filename}
Note:
NK = a total number of instances in dataset
V = a number of vertices
args:
N: int
a size of dataset
return:
dict: (NK, 1, V, 3)
a dictionary for verts-cad_file pairs
"""
# ======= GLOBAL LABEL VARIABLES =======
error_scan = {} # Text
# Anchor collection (for detection)
print_log(" LOADING SCENES")
collect_path = os.path.join(BASE_DIR, 'collect')
for index in range(N):
data = self.dataset[index]
id_scan = data['id_scan']
K = data['n_total']
assert(K <= MAX_NUM_OBJ)
# Point Cloud
mesh_vertices = np.load(os.path.join(self.data_path, id_scan) + '_vert.npy') # (N, 3)
semantic_labels = np.load(os.path.join(self.data_path, id_scan) + '_sem_label.npy') # (N, sem_cls(0, 1~35, 36~MAX, INF))
point_cloud = mesh_vertices[:,0:3]
colors = mesh_vertices[:,3:6] / 127.5 - 1
instance_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
semantic_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
# Sorting points cropping order to avoid overlapping
sort_by_scale = {}
for model in range(K):
obj_scale = np.array(data['models'][model]['trs']['scale'])
sort_by_scale[model] = np.sum(obj_scale)
model_scale_order = {model: scale for model, scale in sorted(sort_by_scale.items(), key=(lambda item:item[1]), reverse=True)}
K = len(model_scale_order.keys())
# Iterate on scale_order
checked = False
k = -1
for i, model in enumerate(model_scale_order.keys()):
k += 1
# semantics ()
sem_cls = data['models'][model]['sem_cls'] # (0~num_classes-1)
# Transform
obj_center = np.array(data['models'][model]['center'])
obj_translation = np.array(data['models'][model]['trs']['translation'])
obj_rotation = np.array(data['models'][model]['trs']['rotation'])
obj_scale = np.array(data['models'][model]['trs']['scale'])
obj_scale = self.size_check(obj_scale, id_scan, sem_cls)
Mobj = compose_mat4(obj_translation, obj_rotation, obj_scale, obj_center)
# Instance vertices
# - (1) Region Crop & Axis-aligned Bounding Box
vert_choices = np.array([])
ins_bbox = np.array(data['models'][model]['bbox'])
obj_corners = s2c_utils.get_3d_box_rotated(ins_bbox, Mobj, padding=PADDING)
ex_points, obj_vert_ind = s2c_utils.extract_pc_in_box3d(point_cloud, obj_corners)
nx = ex_points.shape[0]
# - (2) Instance Segments Crop
seg_points, vert_choices = \
s2c_utils.filter_dominant_cls(point_cloud, obj_vert_ind, semantic_labels, sem_cls+1, NOT_CARED_ID)
seg_nx = seg_points.shape[0]
# ======= Semantic/Instance vertices =======
if seg_nx < SEG_THRESHOLD:
k -= 1
checked = True
continue
sem_cls = REMAPPER[sem_cls]
# if sem_cls < 0: continue # ignore non-valid class object (only preserve CARED classes)
instance_vertices[vert_choices] = k # (0~K-1) NOTE:unannotated=-1
semantic_vertices[vert_choices] = sem_cls # (0~num_classes-1) NOTE:unannotated=-1
# error check
ins_list = np.unique(instance_vertices)
if (np.max(instance_vertices)+1) != (len(ins_list)-1):
print_log(f"[{index}/{N} Error] Please check this scene --> {id_scan}")
error_scan[id_scan] = 0
continue
# DUMP COLLECT RESULTS
if dump:
scene_path = os.path.join(collect_path, f'{id_scan}')
if not os.path.exists(scene_path):
os.mkdir(scene_path)
print("Created scene directory: {}".format(scene_path))
s2c_utils.write_scene_results(points=point_cloud, ins_points=instance_vertices, num_instances=K, bboxes=None, file_path=scene_path)
point_cloud = np.ascontiguousarray(point_cloud[:, :3] - point_cloud[:, :3].mean(0))
pcoord = point_cloud.astype(np.float64)
colors = colors.astype(np.float32)
sem_labels = semantic_vertices.astype(np.float64)
ins_labels = instance_vertices.astype(np.float64)
# ============ DUMP ============
# scene data
file_path = os.path.join(self.out_path, id_scan+'_inst.pth')
torch.save((pcoord, colors, sem_labels, ins_labels), file_path)
print(f"[{index}/{N} Saved] {id_scan} >>> {file_path}")
# error scan
with open(self.out_path+'/error_scan.txt', 'w') as f:
print_log("ERROR SCAN")
for i, sname in enumerate(error_scan.keys()):
print('{:2d}: {}'.format(i, sname))
f.write(sname)
f.write('\n')
if __name__ == "__main__":
Dataset = Scan2CADCollect(split_set='all', distr_check=True)
N = len(Dataset)
Dataset.collect(N, dump=False)
| 38.052308
| 147
| 0.537479
| 1,656
| 12,367
| 3.808575
| 0.199275
| 0.006342
| 0.017441
| 0.011416
| 0.181227
| 0.143174
| 0.088315
| 0.079436
| 0.068654
| 0.056604
| 0
| 0.02756
| 0.307593
| 12,367
| 325
| 148
| 38.052308
| 0.708981
| 0.103582
| 0
| 0.112971
| 0
| 0
| 0.086501
| 0.004132
| 0
| 0
| 0
| 0
| 0.008368
| 1
| 0.041841
| false
| 0
| 0.041841
| 0.004184
| 0.121339
| 0.054393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc18327783ac4d0615c52f0106bc59f803cb607d
| 3,590
|
py
|
Python
|
nappy/msd2diff.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 27
|
2015-10-05T06:21:28.000Z
|
2021-10-04T17:08:23.000Z
|
nappy/msd2diff.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 4
|
2020-11-08T12:39:38.000Z
|
2021-01-10T22:31:36.000Z
|
nappy/msd2diff.py
|
ryokbys/nap
|
ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804
|
[
"MIT"
] | 4
|
2015-01-29T23:10:34.000Z
|
2022-01-08T05:20:13.000Z
|
#!/usr/bin/env python
"""
Compute diffusion coefficient from MSD data.
Time interval, DT, is obtained from in.pmd in the same directory.
Usage:
msd2diff.py [options] MSD_FILE
Options:
-h, --help Show this message and exit.
-o, --offset OFFSET
Offset of given data. [default: 0]
--plot Plot a fitted graph. [default: False]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
__author__ = "RYO KOBAYASHI"
__version__ = "191212"
def read_out_msd(fname='out.msd',offset=0,specorder=[],spc=None):
if specorder == [] or spc not in specorder:
index = 1
else:
index = specorder.index(spc) +1
with open(fname,'r') as f:
lines = f.readlines()
try:
dname = os.path.dirname(fname)
dt = dt_from_inpmd(fname=dname+'/in.pmd')
except Exception as e:
raise RuntimeError('Failed to read in.pmd.')
ts = []
msds = []
n0 = 0
msd0 = 0.0
for il,line in enumerate(lines):
if line[0] == '#':
continue
data = line.split()
if il < offset:
n0 = int(data[0])
msd0 = float(data[index])
continue
n = int(data[0])
msd = float(data[index])
ts.append((n-n0)*dt)
msds.append(msd-msd0)
return np.array(ts),np.array(msds)
def dt_from_inpmd(fname='in.pmd'):
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if 'time_interval' in line:
time_interval = abs(float(line.split()[1]))
elif 'num_iteration' in line:
num_iteration = int(line.split()[1])
elif 'num_out_pos' in line or 'num_out_pmd' in line:
num_out_pos = int(line.split()[1])
return time_interval*num_iteration/num_out_pos
def msd2D(ts,msds,fac,dim=3):
"""
Compute diffusion coefficient from time [fs] vs MSD [Ang^2] data
by solving least square problem using numpy.
Return diffusion coefficient multiplied by FAC.
"""
A= np.array([ts, np.ones(len(ts))])
A = A.T
xvar = np.var(A[:,0])
p,res,_,_ = np.linalg.lstsq(A,msds,rcond=None)
a = p[0]
b = p[1]
# fac = 1.0e-16 /1.e-15
a = a *fac /(2.0*dim)
b = b *fac
# print(res[0],xvar,np.mean(A[:,0]),len(ts))
std = np.sqrt(res[0]/len(ts)/xvar) *fac /(2.0*dim)
return a,b,std
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['MSD_FILE']
offset = int(args['--offset'])
plot = args['--plot']
ts,msds = read_out_msd(fname,offset)
#...Assuming input MSD unit in A^2/fs and output in cm^2/s
fac = 1.0e-16 /1.0e-15
#...Least square
a,b,std = msd2D(ts,msds,fac)
print(' Diffusion coefficient = {0:12.4e}'.format(a)+
' +/- {0:12.4e} [cm^2/s]'.format(std))
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
#...Original time unit == fs
unit = 'fs'
tfac = 1.0
if ts[-1] > 1.0e+5: #...if max t > 100ps, time unit in ps
unit = 'ps'
tfac = 1.0e-3
plt.xlabel('Time ({0:s})'.format(unit))
plt.ylabel('MSD (A^2/{0:s})'.format(unit))
fvals = np.array([ (t*a+b)/fac for t in ts ])
plt.plot(ts*tfac,msds/tfac,'b-',label='MSD data')
plt.plot(ts*tfac,fvals/tfac,'r-',label='Fitted curve')
plt.savefig("graph_msd2D.png", format='png',
dpi=300, bbox_inches='tight')
print(' Wrote graph_msd2D.png')
| 29.186992
| 69
| 0.567688
| 544
| 3,590
| 3.647059
| 0.338235
| 0.00756
| 0.015121
| 0.03125
| 0.058468
| 0.032258
| 0.032258
| 0.032258
| 0.032258
| 0
| 0
| 0.033153
| 0.277437
| 3,590
| 122
| 70
| 29.42623
| 0.731689
| 0.196657
| 0
| 0.071429
| 0
| 0
| 0.108184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc186568dd52a9df9e70c87a7b31fe1c1c3e1f4d
| 1,172
|
py
|
Python
|
5/part2.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | 1
|
2020-04-12T17:54:52.000Z
|
2020-04-12T17:54:52.000Z
|
5/part2.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
5/part2.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
import os
f = open(os.path.join(os.path.dirname(__file__), '../input/5/part2.txt'), 'r')
class InstructionSet:
def __init__(self, instructions):
self.instructions = instructions
self.currentIndex = 0
self.numberSteps = 0
def _changeOffsetValue(self, index):
if self.instructions[index] >= 3:
self.instructions[index] -= 1
else:
self.instructions[index] += 1
def jump(self):
self.numberSteps += 1
jumpNumber = self.instructions[self.currentIndex]
oldIndex = self.currentIndex
self.currentIndex += jumpNumber
self._changeOffsetValue(oldIndex)
def run(self):
while (self.currentIndex >= 0
and self.currentIndex < len(self.instructions)):
self.jump()
def main():
def formatLine(line):
return int(line.rstrip())
line = f.readline()
instructions = []
while line:
instructions.append(formatLine(line))
line = f.readline()
instructionSet = InstructionSet(instructions)
instructionSet.run()
print(instructionSet.numberSteps)
if __name__ == '__main__':
main()
| 23.44
| 78
| 0.619454
| 118
| 1,172
| 6
| 0.381356
| 0.158192
| 0.084746
| 0.062147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010453
| 0.265358
| 1,172
| 49
| 79
| 23.918367
| 0.811847
| 0
| 0
| 0.057143
| 0
| 0
| 0.024744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0
| 0.028571
| 0.028571
| 0.257143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc18a51ed3a62618a4f8d1b8d53f53c96ae69319
| 11,944
|
py
|
Python
|
tests/test_sync_module.py
|
naveengh6/blinkpy
|
e821687f2b7590b13532ac596c31e8eaa6c7b69a
|
[
"MIT"
] | 272
|
2017-01-29T18:43:25.000Z
|
2022-03-27T20:43:50.000Z
|
tests/test_sync_module.py
|
naveengh6/blinkpy
|
e821687f2b7590b13532ac596c31e8eaa6c7b69a
|
[
"MIT"
] | 434
|
2017-01-23T20:22:51.000Z
|
2022-03-31T18:10:36.000Z
|
tests/test_sync_module.py
|
naveengh6/blinkpy
|
e821687f2b7590b13532ac596c31e8eaa6c7b69a
|
[
"MIT"
] | 77
|
2017-04-15T17:04:04.000Z
|
2022-03-04T10:03:39.000Z
|
"""Tests camera and system functions."""
import unittest
from unittest import mock
from blinkpy.blinkpy import Blink
from blinkpy.helpers.util import BlinkURLHandler
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.camera import BlinkCamera, BlinkCameraMini
@mock.patch("blinkpy.auth.Auth.query")
class TestBlinkSyncModule(unittest.TestCase):
"""Test BlinkSyncModule functions in blinkpy."""
def setUp(self):
"""Set up Blink module."""
self.blink = Blink(motion_interval=0)
self.blink.last_refresh = 0
self.blink.urls = BlinkURLHandler("test")
self.blink.sync["test"] = BlinkSyncModule(self.blink, "test", "1234", [])
self.camera = BlinkCamera(self.blink.sync)
self.mock_start = [
{
"syncmodule": {
"id": 1234,
"network_id": 5678,
"serial": "12345678",
"status": "foobar",
}
},
{"event": True},
{},
{},
None,
{"devicestatus": {}},
]
self.blink.sync["test"].network_info = {"network": {"armed": True}}
def tearDown(self):
"""Clean up after test."""
self.blink = None
self.camera = None
self.mock_start = None
def test_bad_status(self, mock_resp):
"""Check that we mark module unavaiable on bad status."""
self.blink.sync["test"].status = None
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].online)
self.assertFalse(self.blink.sync["test"].available)
def test_bad_arm(self, mock_resp):
"""Check that we mark module unavaiable if bad arm status."""
self.blink.sync["test"].network_info = None
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].network_info = {}
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
def test_get_events(self, mock_resp):
"""Test get events function."""
mock_resp.return_value = {"event": True}
self.assertEqual(self.blink.sync["test"].get_events(), True)
def test_get_events_fail(self, mock_resp):
"""Test handling of failed get events function."""
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_events())
mock_resp.return_value = {}
self.assertFalse(self.blink.sync["test"].get_events())
def test_get_camera_info(self, mock_resp):
"""Test get camera info function."""
mock_resp.return_value = {"camera": ["foobar"]}
self.assertEqual(self.blink.sync["test"].get_camera_info("1234"), "foobar")
def test_get_camera_info_fail(self, mock_resp):
"""Test handling of failed get camera info function."""
mock_resp.return_value = None
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {"camera": None}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
def test_get_network_info(self, mock_resp):
"""Test network retrieval."""
mock_resp.return_value = {"network": {"sync_module_error": False}}
self.assertTrue(self.blink.sync["test"].get_network_info())
mock_resp.return_value = {"network": {"sync_module_error": True}}
self.assertFalse(self.blink.sync["test"].get_network_info())
def test_get_network_info_failure(self, mock_resp):
"""Test failed network retrieval."""
mock_resp.return_value = {}
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].available = True
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
def test_check_new_videos_startup(self, mock_resp):
"""Test that check_new_videos does not block startup."""
sync_module = self.blink.sync["test"]
self.blink.last_refresh = None
self.assertFalse(sync_module.check_new_videos())
def test_check_new_videos(self, mock_resp):
"""Test recent video response."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 0
self.assertEqual(sync_module.motion, {})
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
self.assertEqual(sync_module.motion, {"foo": True})
mock_resp.return_value = {"media": []}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
def test_check_new_videos_old_date(self, mock_resp):
"""Test videos return response with old date."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_no_motion_if_not_armed(self, mock_resp):
"""Test that motion detection is not set if module unarmed."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
sync_module.network_info = {"network": {"armed": False}}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_multiple_videos(self, mock_resp):
"""Test motion found even with multiple videos."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/bar/foo.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/foobar.mp4",
"created_at": "1970-01-01T00:00:01+00:00",
},
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
expected_result = {
"foo": {"clip": "/bar/foo.mp4", "time": "1990-01-01T00:00:00+00:00"}
}
self.assertEqual(sync_module.last_record, expected_result)
def test_check_new_videos_failed(self, mock_resp):
"""Test method when response is unexpected."""
mock_resp.side_effect = [None, "just a string", {}]
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
def test_sync_start(self, mock_resp):
"""Test sync start function."""
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].name, "test")
self.assertEqual(self.blink.sync["test"].sync_id, 1234)
self.assertEqual(self.blink.sync["test"].network_id, 5678)
self.assertEqual(self.blink.sync["test"].serial, "12345678")
self.assertEqual(self.blink.sync["test"].status, "foobar")
def test_unexpected_summary(self, mock_resp):
"""Test unexpected summary response."""
self.mock_start[0] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_no_network_id(self, mock_resp):
"""Test handling of bad summary."""
self.mock_start[0]["syncmodule"] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_only_network_id(self, mock_resp):
"""Test handling of sparse summary."""
self.mock_start[0]["syncmodule"] = {"network_id": 8675309}
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].network_id, 8675309)
def test_unexpected_camera_info(self, mock_resp):
"""Test unexpected camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = None
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_missing_camera_info(self, mock_resp):
"""Test missing key from camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = {}
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_sync_attributes(self, mock_resp):
"""Test sync attributes."""
self.assertEqual(self.blink.sync["test"].attributes["name"], "test")
self.assertEqual(self.blink.sync["test"].attributes["network_id"], "1234")
def test_owl_start(self, mock_resp):
"""Test owl camera instantiation."""
response = {
"name": "foo",
"id": 2,
"serial": "foobar123",
"enabled": True,
"network_id": 1,
"thumbnail": "/foo/bar",
}
self.blink.last_refresh = None
self.blink.homescreen = {"owls": [response]}
owl = BlinkOwl(self.blink, "foo", 1234, response)
self.assertTrue(owl.start())
self.assertTrue("foo" in owl.cameras)
self.assertEqual(owl.cameras["foo"].__class__, BlinkCameraMini)
| 40.488136
| 83
| 0.592264
| 1,399
| 11,944
| 4.857756
| 0.107219
| 0.083431
| 0.103296
| 0.132578
| 0.748823
| 0.67319
| 0.609918
| 0.573278
| 0.51236
| 0.471895
| 0
| 0.029636
| 0.262642
| 11,944
| 294
| 84
| 40.62585
| 0.742023
| 0.080124
| 0
| 0.466102
| 0
| 0
| 0.11063
| 0.022845
| 0
| 0
| 0
| 0
| 0.241525
| 1
| 0.101695
| false
| 0
| 0.025424
| 0
| 0.131356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc1a91eb27f4ff382a15602726e82a1122f6307d
| 2,807
|
py
|
Python
|
dymos/examples/min_time_climb/aero/aero.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/examples/min_time_climb/aero/aero.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/examples/min_time_climb/aero/aero.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import numpy as np
from openmdao.api import Group
from .dynamic_pressure_comp import DynamicPressureComp
from .lift_drag_force_comp import LiftDragForceComp
from .cd0_comp import CD0Comp
from .kappa_comp import KappaComp
from .cla_comp import CLaComp
from .cl_comp import CLComp
from .cd_comp import CDComp
from .mach_comp import MachComp
class AeroGroup(Group):
"""
The purpose of the AeroGroup is to compute the aerodynamic forces on the
aircraft in the body frame.
Parameters
----------
v : float
air-relative velocity (m/s)
sos : float
local speed of sound (m/s)
rho : float
atmospheric density (kg/m**3)
alpha : float
angle of attack (rad)
S : float
aerodynamic reference area (m**2)
"""
def initialize(self):
self.options.declare('num_nodes', types=int,
desc='Number of nodes to be evaluated in the RHS')
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem(name='mach_comp',
subsys=MachComp(num_nodes=nn),
promotes_inputs=['v', 'sos'],
promotes_outputs=['mach'])
self.add_subsystem(name='cd0_comp',
subsys=CD0Comp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CD0'])
self.add_subsystem(name='kappa_comp',
subsys=KappaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['kappa'])
self.add_subsystem(name='cla_comp',
subsys=CLaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CLa'])
self.add_subsystem(name='CL_comp',
subsys=CLComp(num_nodes=nn),
promotes_inputs=['alpha', 'CLa'],
promotes_outputs=['CL'])
self.add_subsystem(name='CD_comp',
subsys=CDComp(num_nodes=nn),
promotes_inputs=['CD0', 'alpha', 'CLa', 'kappa'],
promotes_outputs=['CD'])
self.add_subsystem(name='q_comp',
subsys=DynamicPressureComp(num_nodes=nn),
promotes_inputs=['rho', 'v'],
promotes_outputs=['q'])
self.add_subsystem(name='lift_drag_force_comp',
subsys=LiftDragForceComp(num_nodes=nn),
promotes_inputs=['CL', 'CD', 'q', 'S'],
promotes_outputs=['f_lift', 'f_drag'])
| 34.231707
| 79
| 0.530816
| 292
| 2,807
| 4.890411
| 0.321918
| 0.056022
| 0.089636
| 0.112045
| 0.17437
| 0.090336
| 0.090336
| 0.090336
| 0
| 0
| 0
| 0.004494
| 0.365871
| 2,807
| 81
| 80
| 34.654321
| 0.797753
| 0.121838
| 0
| 0.061224
| 0
| 0
| 0.090568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.22449
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc1d28c4600f03845019e2280e8c9b05ec587f01
| 930
|
py
|
Python
|
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/06_Nested-Loops/02.Exercise-06-Special-Numbers.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/06_Nested-Loops/02.Exercise-06-Special-Numbers.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/06_Nested-Loops/02.Exercise-06-Special-Numbers.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# 6. Специални числа
# Да се напише програма, която чете едно цяло число N, въведено от потребителя, и генерира всички възможни "специални"
# числа от 1111 до 9999. За да бъде “специално” едно число, то трябва да отговаря на следното условие:
# • N да се дели на всяка една от неговите цифри без остатък.
# Пример: при N = 16, 2418 е специално число:
# • 16 / 2 = 8 без остатък
# • 16 / 4 = 4 без остатък
# • 16 / 1 = 16 без остатък
# • 16 / 8 = 2 без остатък
N = int(input())
for number in range(1111, 9999 + 1):
is_number_special = True
number_as_string = str(number)
# Could also write for index, digit in enumerate(number_as_string): but since we don't need the index we don't need enumerate.
for digit in number_as_string:
if int(digit) == 0 or N % int(digit) != 0:
is_number_special = False
break
if is_number_special:
print(f'{number_as_string}', end = ' ')
| 35.769231
| 130
| 0.665591
| 157
| 930
| 3.88535
| 0.522293
| 0.081967
| 0.091803
| 0.063934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061254
| 0.245161
| 930
| 25
| 131
| 37.2
| 0.80057
| 0.608602
| 0
| 0
| 0
| 0
| 0.054286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc1f1d11a9a9d323ee25ccd432c9e05f59ae89c2
| 29,526
|
py
|
Python
|
tokenization_numerical.py
|
dspoka/mnm
|
f212e8d5697a4556c6469d469a2930b203667828
|
[
"MIT"
] | 1
|
2021-07-08T04:18:30.000Z
|
2021-07-08T04:18:30.000Z
|
tokenization_numerical.py
|
dspoka/mnm
|
f212e8d5697a4556c6469d469a2930b203667828
|
[
"MIT"
] | 1
|
2021-08-24T03:36:53.000Z
|
2021-08-24T03:36:53.000Z
|
tokenization_numerical.py
|
dspoka/mnm
|
f212e8d5697a4556c6469d469a2930b203667828
|
[
"MIT"
] | 1
|
2021-07-08T04:18:32.000Z
|
2021-07-08T04:18:32.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import sys
import unicodedata
from io import open
from transformers import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertNumericalTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertNumericalTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertNumericalTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertNumericalTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.unk_num = '[UNK_NUM]'
self.default_value = 1.0
never_split = ['[UNK_NUM]']
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
self.numerical_tokenizer = NumericalTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token, unk_num=self.unk_num)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text, get_values=False, get_sigfigs=None, get_numeric_masks=None):
split_tokens = []
numeric_values = []
numeric_masks = []
split_sigfigs = []
i = 0
for (token, sigfig) in self.numerical_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for (sub_token, numeric_value, numeric_mask) in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
numeric_values.append(numeric_value)
numeric_masks.append(numeric_mask)
if numeric_value != self.default_value:
split_sigfigs.append(sigfig)
else:
split_sigfigs.append('-1')
if numeric_value != self.default_value and sub_token != self.unk_num:
print(sub_token, numeric_value)
foohere
if get_numeric_masks:
return numeric_masks
if get_values:
return numeric_values
assert len(split_tokens) == len(numeric_values) == len(split_sigfigs)
if get_sigfigs:
return split_sigfigs
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return (vocab_file,)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
""" Instantiate a BertNumericalTokenizer from pre-trained vocabulary files.
"""
if pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES:
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
return super(BertNumericalTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
class NumericalTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text, never_split=None):
""" Basic Numerical Tokenization of a piece of text.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# digits = '0123456789'
# punctuation = '$%'
# text = self._clean_text(text)
# orig_tokens = whitespace_tokenize(text)
split_tokens, split_sigfigs = normalize_numbers_in_sent(text)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
output_sigfigs = whitespace_tokenize(" ".join(split_sigfigs))
return zip(output_tokens,split_sigfigs)
# return output_tokens,
# _numbers = '[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?'
# fraction_pattern = re.compile(_fraction)
# number_pattern = re.compile(_numbers)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
#dont split on periods if number is before it
# if _is_punctuation(char) and not chars[i-1].isdigit() or _is_punctuation(char) and i == 0:
if _is_punctuation(char):
if i == 0:
do_split = True
elif i == len(chars)-1:
do_split = True
else:
if not chars[i-1].isdigit():
do_split = True
else:
do_split = False
else:
do_split = False
if do_split:
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, unk_num, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.unk_num = unk_num
self.default_value = 1.0
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
numeric_values = []
numeric_mask = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)
numeric_mask.append(0)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
try:
if token not in ['infinity', 'inf', 'nan']:
numeric_value = float(token)
is_number = True
else:
is_number = False
except:
ValueError
is_number = False
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab and is_number == False:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_number:
#ACTUAL NUMBER HERE
output_tokens.append(self.unk_num)
numeric_values.append(numeric_value)
numeric_mask.append(1)
elif is_bad:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)#-9e9
numeric_mask.append(0)
else:
numeric_values.extend([self.default_value]*len(sub_tokens))#-9e9
numeric_mask.extend([0]*len(sub_tokens))
output_tokens.extend(sub_tokens)
assert len(numeric_values) == len(output_tokens) == len(numeric_mask)
return zip(output_tokens, numeric_values, numeric_mask)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
# if cat.startswith("P") and cp != 46:
if cat.startswith("P"):
return True
return False
################
#
Small = {
'zero': 0.0,
'one': 1.0,
'two': 2.0,
'three': 3.0,
'four': 4.0,
'five': 5.0,
'six': 6.0,
'seven': 7.0,
'eight': 8.0,
'nine': 9.0,
'ten': 10.0,
'eleven': 11.0,
'twelve': 12.0,
'thirteen': 13.0,
'fourteen': 14.0,
'fifteen': 15.0,
'sixteen': 16.0,
'seventeen': 17.0,
'eighteen': 18.0,
'nineteen': 19.0,
'twenty': 20.0,
'thirty': 30.0,
'forty': 40.0,
'fifty': 50.0,
'sixty': 60.0,
'seventy': 70.0,
'eighty': 80.0,
'ninety': 90.0
}
Magnitude = {
'thousand': 1000.0,
'million': 1000000.0,
'billion': 1000000000.0,
'trillion': 1000000000000.0,
'quadrillion': 1000000000000000.0,
'quintillion': 1000000000000000000.0,
'sextillion': 1000000000000000000000.0,
'septillion': 1000000000000000000000000.0,
'octillion': 1000000000000000000000000000.0,
'nonillion': 1000000000000000000000000000000.0,
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def text2num(sent):
if type(sent) is str:
words = [word.lower() for word in sent.strip().split()]
elif type(sent) is list:
words = [word.lower() for word in sent]
# n = 0
# g = 0
mantissa = 0
# number = 0.0
for i, word in enumerate(words):
if i == 0:
mantissa = Small.get(word, None)
if mantissa is None:
try:
mantissa = float(word)
except ValueError:
raise NumberException("First must be a number of sorts")
elif i != 0:
magnitude = Magnitude.get(word, None)
if magnitude is not None:
mantissa = mantissa*magnitude
else: # non-number word
raise NumberException("Unknown number: "+word)
return mantissa
def generate_ngrams(sentence, n):
return zip(*[sentence[i:] for i in range(n)])
def check_int(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def preprocess(sent, remove_pos=False, never_split=None):
"""
Preprocess the sentence by:
. remove commas from numbers (2,000 -> 2000)
. remove endings from ordinal numbers (2nd -> 2)
. convert "a {hundred,thousand...}" to "one {hundred,thousand,...}" so it can be handled by text2num function
. convert "digit digitword" (24 hundred) -> 2400
and return the sentence's preprocessed list of words that should be passed into text2num.
"""
if remove_pos:
words = [word[:word.rfind('_')] for word in sent.strip().split()]
else:
words = [word for word in sent.strip().split()]
tokenizer = BasicTokenizer(do_lower_case=True, never_split=never_split)
words = tokenizer.tokenize(sent)
# sent = ' '.join(tokens)
words_lower = [word.lower() for word in words]
# remove commas from numbers "2,000" -> 2000 and remove endings from ordinal numbers
for i in range(len(words)):
new_word = words_lower[i].replace(',', '')
if new_word.endswith(('th', 'rd', 'st', 'nd')):
new_word = new_word[:-2]
try:
if new_word not in ['infinity', 'inf', 'nan']:
int_word = float(new_word)
# words[i] = str(int_word)
words[i] = new_word
except ValueError:
pass # only modify this word if it's an int after preprocessing
Magnitude_with_hundred = Magnitude.copy()
Magnitude_with_hundred['hundred'] = 100
# convert "a {hundred,thousand,million,...}" to "one {hundred,thousand,million,...}"
for i in range(len(words)-1):
if words_lower[i] == 'a' and words_lower[i+1] in Magnitude_with_hundred:
words[i] = 'one'
# convert "24 {Magnitude}" -> 24000000000000 (mix of digits and words)
new_words = []
sigs = []
i = 0
while i < len(words)-1:
if check_int(words_lower[i]) and words_lower[i+1] in Magnitude_with_hundred:
new_words.append(str(float(words_lower[i]) * Magnitude_with_hundred[words_lower[i+1]]))
sigs.append(f'{words_lower[i]} {words_lower[i+1]}')
i += 1
else:
new_words.append(words[i])
sigs.append('')
if i == len(words) - 2:
new_words.append(words[i+1])
sigs.append('')
i += 1
return new_words, sigs
#
#
def normalize_numbers_in_sent(sent, remove_pos=False, never_split=None):
"""
Given a sentence, perform preprocessing and normalize number words to digits.
:param sent: sentence (str)
:return: a list of normalized words from the sentence
"""
out_words = []
words, sigfigs = preprocess(sent, remove_pos, never_split)
out_sigfigs = []
i = 0
while i < len(words):
for j in range(len(words), i, -1):
try:
number = str(text2num(words[i:j]))
if sigfigs[i] == '':
out_sigfigs.append(' '.join(words[i:j]))
else:
out_sigfigs.append(sigfigs[i])
out_words.append(number)
i = j-1 # skip this sequence since we replaced it with a number
break
except NumberException:
if j == i+1:
out_sigfigs.append('-1')
out_words.append(words[i])
i += 1
assert len(out_sigfigs) == len(out_words)
return out_words, out_sigfigs
| 39.953992
| 183
| 0.601605
| 3,605
| 29,526
| 4.772538
| 0.170042
| 0.022087
| 0.015984
| 0.013252
| 0.384946
| 0.328277
| 0.280732
| 0.242371
| 0.21517
| 0.200407
| 0
| 0.025797
| 0.294994
| 29,526
| 739
| 184
| 39.953992
| 0.80073
| 0.264005
| 0
| 0.227176
| 0
| 0.027601
| 0.151609
| 0.031277
| 0
| 0
| 0.005348
| 0
| 0.006369
| 1
| 0.063694
| false
| 0.002123
| 0.016985
| 0.004246
| 0.178344
| 0.004246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc1f29f43c293c82628f38a87129e37c79fd02ea
| 6,694
|
py
|
Python
|
dipole/splitting_dipole.py
|
wheelerMT/spin-1_BEC
|
e8ea34699b4001847c6b4c7451c11be241ce598f
|
[
"MIT"
] | null | null | null |
dipole/splitting_dipole.py
|
wheelerMT/spin-1_BEC
|
e8ea34699b4001847c6b4c7451c11be241ce598f
|
[
"MIT"
] | null | null | null |
dipole/splitting_dipole.py
|
wheelerMT/spin-1_BEC
|
e8ea34699b4001847c6b4c7451c11be241ce598f
|
[
"MIT"
] | null | null | null |
import numpy as np
import multiprocessing as mp
import pyfftw
from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan
from numpy import heaviside as heav
from include import helper
import h5py
# ---------Spatial and potential parameters--------------
Mx = My = 64
Nx = Ny = 128 # Number of grid pts
dx = dy = 1 / 2 # Grid spacing
dkx = pi / (Mx * dx)
dky = pi / (My * dy) # K-space spacing
len_x = Nx * dx # Box length
len_y = Ny * dy
x = np.arange(-Mx, Mx) * dx
y = np.arange(-My, My) * dy
X, Y = np.meshgrid(x, y) # Spatial meshgrid
data = h5py.File('../data/splitting_dipole_data.hdf5', 'a')
data.create_dataset('grid/x', x.shape, data=x)
data.create_dataset('grid/y', y.shape, data=y)
kx = np.fft.fftshift(np.arange(-Mx, Mx) * dkx)
ky = np.fft.fftshift(np.arange(-My, My) * dky)
Kx, Ky = np.meshgrid(kx, ky) # K-space meshgrid
# Initialising FFTs
cpu_count = mp.cpu_count()
wfn_data = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
fft_forward = pyfftw.FFTW(wfn_data, wfn_data, axes=(0, 1), threads=cpu_count)
fft_backward = pyfftw.FFTW(wfn_data, wfn_data, direction='FFTW_BACKWARD', axes=(0, 1), threads=cpu_count)
# Framework for wavefunction data
psi_plus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_0_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_minus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
# Controlled variables
V = 0. # Doubly periodic box
p = q = 0.
c0 = 2
c1 = 0.5 # Effective 3-component BEC
k = 0 # Array index
# ------------------------------ Generating SQV's -------------------------
# Euler angles
alpha = 0.
beta = pi / 4
gamma = 0.
N_vort = 2 # Number of vortices
pos = [-10, 0, 10, 0]
theta_k = np.empty((N_vort, Nx, Ny))
theta_tot = np.empty((Nx, Ny))
for k in range(N_vort // 2):
# Scaling positional arguments
Y_minus = 2 * pi * (Y - pos[k]) / len_y
X_minus = 2 * pi * (X - pos[N_vort // 2 + k]) / len_x
Y_plus = 2 * pi * (Y - pos[N_vort + k]) / len_y
X_plus = 2 * pi * (X - pos[3 * N_vort // 2 + k]) / len_x
x_plus = 2 * pi * pos[3 * N_vort // 2 + k] / len_x
x_minus = 2 * pi * pos[N_vort // 2 + k] / len_x
for nn in np.arange(-5, 5):
theta_k[k, :, :] += arctan(
tanh((Y_minus + 2 * pi * nn) / 2) * tan((X_minus - pi) / 2)) \
- arctan(tanh((Y_plus + 2 * pi * nn) / 2) * tan((X_plus - pi) / 2)) \
+ pi * (heav(X_plus, 1.) - heav(X_minus, 1.))
theta_k[k, :, :] -= (2 * pi * Y / len_y) * (x_plus - x_minus) / (2 * pi)
theta_tot += theta_k[k, :, :]
# Initial wavefunction
Psi = np.empty((3, Nx, Ny), dtype='complex128')
Psi[0, :, :] = np.zeros((Nx, Ny)) + 0j
Psi[1, :, :] = np.ones((Nx, Ny), dtype='complex128') * exp(1j * theta_tot)
Psi[2, :, :] = np.zeros((Nx, Ny)) + 0j
psi_plus, psi_0, psi_minus = helper.rotation(Psi, Nx, Ny, alpha, beta, gamma) # Performs rotation to wavefunction
# Aligning wavefunction to potentially speed up FFTs
pyfftw.byte_align(psi_plus)
pyfftw.byte_align(psi_0)
pyfftw.byte_align(psi_minus)
# ------------------------------------------------------------------------
# Normalisation constants
N_plus = dx * dy * np.linalg.norm(psi_plus) ** 2
N_0 = dx * dy * np.linalg.norm(psi_0) ** 2
N_minus = dx * dy * np.linalg.norm(psi_minus) ** 2
# Time steps, number and wavefunction save variables
Nt = 80000
Nframe = 200
dt = 5e-3
t = 0.
# Saving time variables:
data.create_dataset('time/Nt', data=Nt)
data.create_dataset('time/dt', data=dt)
data.create_dataset('time/Nframe', data=Nframe)
# Setting up variables to be sequentially saved:
psi_plus_save = data.create_dataset('wavefunction/psi_plus', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_0_save = data.create_dataset('wavefunction/psi_0', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_minus_save = data.create_dataset('wavefunction/psi_minus', (Nx, Ny, Nt/Nframe), dtype='complex128')
for i in range(Nt):
# Spin vector terms:
F_perp = sqrt(2.) * (conj(psi_plus) * psi_0 + conj(psi_0) * psi_minus)
Fz = abs(psi_plus) ** 2 - abs(psi_minus) ** 2
F = sqrt(abs(Fz) ** 2 + abs(F_perp) ** 2) # Magnitude of spin vector
# Total density
n = abs(psi_minus) ** 2 + abs(psi_0) ** 2 + abs(psi_plus) ** 2
# Sin and cosine terms for solution
C = cos(c1 * F * (-1j * dt))
if F.min() == 0:
S = np.zeros((Nx, Ny), dtype='complex128') # Ensures no division by zero
else:
S = 1j * sin(c1 * F * (-1j * dt)) / F
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Trap, linear Zeeman & interaction flow
psi_plus = ((C - S * Fz) * psi_plus - 1. / sqrt(2.) * S * conj(F_perp) * psi_0) * exp(-dt * (V - p + c0 * n))
psi_0 = (-1. / sqrt(2.) * S * F_perp * psi_plus + C * psi_0 - 1. / sqrt(2.) * S * conj(F_perp) * psi_minus) \
* exp(-dt * (V + c0 * n))
psi_minus = (-1. / sqrt(2.) * S * F_perp * psi_0 + (C + S * Fz) * psi_minus) * exp(-dt * (V + p + c0 * n))
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Renormalizing wavefunction
psi_plus *= sqrt(N_plus) / sqrt(dx * dy * np.linalg.norm(psi_plus) ** 2)
psi_0 *= sqrt(N_0) / sqrt(dx * dy * np.linalg.norm(psi_0) ** 2)
psi_minus *= sqrt(N_minus) / sqrt(dx * dy * np.linalg.norm(psi_minus) ** 2)
# Prints current time and saves data to an array
if np.mod(i, Nframe) == 0:
print('it = %1.4f' % t)
psi_plus_save[:, :, k] = psi_plus[:, :]
psi_0_save[:, :, k] = psi_0[:, :]
psi_minus_save[:, :, k] = psi_minus[:, :]
k += 1
t += dt
data.close()
| 34.864583
| 114
| 0.586047
| 1,105
| 6,694
| 3.366516
| 0.182805
| 0.032258
| 0.036559
| 0.035753
| 0.412634
| 0.396237
| 0.307796
| 0.262903
| 0.204839
| 0.174194
| 0
| 0.039799
| 0.22677
| 6,694
| 191
| 115
| 35.04712
| 0.678903
| 0.168659
| 0
| 0.190476
| 0
| 0
| 0.046352
| 0.013942
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2274d5bd59faf9232572f6514dafd536557966
| 625
|
py
|
Python
|
mock_file.py
|
MahirGulzar/fpointnet-tiny
|
e79406f648573d50fa3988ca987db652ab1286b8
|
[
"MIT"
] | null | null | null |
mock_file.py
|
MahirGulzar/fpointnet-tiny
|
e79406f648573d50fa3988ca987db652ab1286b8
|
[
"MIT"
] | null | null | null |
mock_file.py
|
MahirGulzar/fpointnet-tiny
|
e79406f648573d50fa3988ca987db652ab1286b8
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
FLIPPING_TENSOR = tf.constant([1.0, -1.0, 1.0])
@tf.function
def sample_data(points, labels, num_point):
if tf.random.uniform(shape=()) >= 0.5:
return points * FLIPPING_TENSOR, labels
return points, labels
mock_data = tf.constant([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]
])
mock_labels = tf.constant([
[1.], [0.], [1.]
])
sampling_lambda = lambda x, y: sample_data(x, y, 512)
train_data = tf.data.Dataset.from_tensors((mock_data, mock_labels)) \
.map(sampling_lambda) \
.unbatch() \
.batch(1) \
.repeat(5)
for x, y in train_data:
print(x)
| 19.53125
| 69
| 0.6048
| 95
| 625
| 3.831579
| 0.494737
| 0.021978
| 0.090659
| 0.065934
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05071
| 0.2112
| 625
| 32
| 70
| 19.53125
| 0.687627
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.173913
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc24427e78d6696d2cac568f07f35aa2881831bf
| 10,683
|
py
|
Python
|
Blog.py
|
OliverChao/PyWhoAmI
|
8742e0a44c4e673d038779b01b14b0cfb7d5395f
|
[
"MIT"
] | null | null | null |
Blog.py
|
OliverChao/PyWhoAmI
|
8742e0a44c4e673d038779b01b14b0cfb7d5395f
|
[
"MIT"
] | null | null | null |
Blog.py
|
OliverChao/PyWhoAmI
|
8742e0a44c4e673d038779b01b14b0cfb7d5395f
|
[
"MIT"
] | null | null | null |
import aiohttp
import asyncio
import time
import time
import argparse
import glob
import os
import shutil
import random
import re
import requests
import sys
from concurrent import futures
import pdfkit
import time
from retrying import retry
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.lexers import CppLexer
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
import numbers
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote
def u(x):
return x
scripFilePath = os.path.split(os.path.realpath(__file__))[0]
PDF_DIR = os.path.join(scripFilePath,'whoamiPDFdir')
CPP_DIR = os.path.join(scripFilePath,'whoamiCPPdir')
class Result(object):
def __init__(self, host, args):
self.args = args
self.host = host
self._search_url = 'https://www.bing.com/search?q=site:{0}%20{1}'
self._USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
# 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
self.data = self.whoami()
def __call__(self, *args, **kwargs):
return self.show_results()
def __len__(self):
return len(self.data)
def whoami(self):
self.args['query'] = ' '.join(self.args['query']).replace('?', '')
try:
return self.confirm_links() or 'Sorry, couldn\'t find any help with that topic\n'
except (ConnectionError, SSLError):
return 'Failed to establish network connection\n'
def confirm_links(self):
dic = self._get_dict(self.args['query'])
if not dic:
return False
'''先不检验。。测试多个域名。。'''
return dic
# def _is_article(link):
# return re.search('article/details/\d+', link)
# # question_links = [link for link in links if _is_article(link)]
# # https://blog.csdn.net/u013177568/article/details/62432761
# confirm_dict = {k: v for k, v in dic.items() if _is_article(v)}
# return confirm_dict
def _get_dict(self, query):
search_url = self._search_url.format(self.host, url_quote(query))
# search_url : site:blog.csdn.net 1173 HDU
result = self._get_result(search_url)
html = pq(result)
# return the anser_list
return self._extract_links(html, 'bing')
@retry(stop_max_attempt_number=3)
def _get_result(self, url):
try:
return requests.get(url, headers={'User-Agent': random.choice(self._USER_AGENTS)}, ).text
# verify = VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
print('[ERROR] Encountered an SSL Error.\n')
print('[*]retrying again automatically ')
raise e
def _extract_links(self, html, search_engine):
if search_engine == 'bing':
return self._extract_dict_from_bing(html)
return None
@staticmethod
def _extract_dict_from_bing(html):
html.remove_namespaces()
dic = {}
for a in html('.b_algo')('h2')('a'):
# name ='[*{0}*] {1}'.format(str(num),a.text)
name = a.text
link = a.attrib['href']
dic[name] = str(link)
# num+=1
return dic
def show_results(self):
if isinstance(self.data,str):
print('[!!] ',self.data)
return
num = 0
for k, v in self.data.items():
print('[*{}*] '.format(str(num)), end='')
print(k, end=' [*link*] ')
print(v)
num += 1
class Blog(Result):
def __init__(self, host, args):
super().__init__(host, args)
self.links = list(self.data.values())
def show_code(self):
url = list(self.data.values())[self.args['print']]
main_page = self._parse_url(url)
s = self._get_code(main_page, self.args) or 'sorry,this article has no code...'
print(s)
def save_to_pdf(self, url):
html_template = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
<!-- <center><h1>{title}</h1></center> -->
{content}
</body>
</html>
"""
options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-name1', 'cookie-value1'),
('cookie-name2', 'cookie-value2'),
],
'outline-depth': 10,
}
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s\[\]\(\)\-]', '.', title)
html = html_template.format(title='Oliver loves Annabelle forever~', content=main_page.html())
if not os.path.exists(PDF_DIR):
os.makedirs(PDF_DIR)
filePath = os.path.join(PDF_DIR, title + '.pdf')
if self._test_is_open_if_exists(filePath):
return
try:
print('[*] save to ', filePath)
self._save_to_pdf(html,filePath)
print('[*] successfully ')
except:
print('[!!]要保存的网页可能有网页冲突')
print('[注]保存html等语言的文档冲突的几率较大')
print('[!!]save failed')
print('[!!]如果事因为图片路径造成的保存失败,文字和代码部分则会正常生成pdf,')
try:
# 系统级命令好像try不到。。。
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]请重新生成pdf,或者,该网页的结构不符合生成pdf的标准')
print('[~~]请见谅。。。。')
@staticmethod
def _save_to_pdf(html, filepath):
wkhtmltopdf_path = scripFilePath + '/wkhtmltox/bin/wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_path)
pdfkit.from_string(html, filepath, configuration=config)
def open_after_save(self, pdf_path):
if not self.args['open_pdf']:
return
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
# if args['pdf'] and PDFpath.split('.')[-1]!='pdf':
# PDFpath += '.pdf'
os.popen(pdf_path)
def _test_is_open_if_exists(self, file_path):
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
if self.args['open_pdf']:
if os.path.exists(file_path):
print('文件已经存在,直接打开')
os.popen(file_path)
return True
else:
return False
def _parse_url(self, url):
'''
:param url: 网页url
:return: 返回网页的主要区域的pyquery
'''
page = self._get_result(url)
html = pq(page)
# the main part of the article
return html('.blog-content-box')
def _get_code(self, main_page, args):
'''
:param main_page:main_page=_parse_url(url)
:param args: args
:return: str
'''
html = main_page('article')('pre')('code') or main_page('article')('pre')
if not html:
return None
ans = []
ans_split = '\n' + '<==>' * 17 + '\n'
if args['all_code']:
for node in html:
node = pq(node)
s = node.html()
# s=re.sub('</?[^>]+>','',s)
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
else:
node = pq(html[-1])
s = node.html()
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
return ans_split.join(ans)
@staticmethod
def _add_color(code, args):
if not args['color']:
return code
lexer = None
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code, CppLexer(), TerminalFormatter(bg='dark'))
def save_to_cpp(self):
ans_split = '\n' + '<==>' * 17 + '\n'
url = self.links[self.args['number_link']]
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s]', '.', title)
s = self._get_code(main_page, self.args)
if not s:
print('sorry , this article has no code...')
print('please try another...')
return
if not os.path.exists(CPP_DIR):
os.makedirs(CPP_DIR)
filePath = os.path.join(CPP_DIR, title + '.cpp')
if self._test_is_open_if_exists(filePath):
return
code = s.split(ans_split)[-1]
with open(filePath, 'w')as f:
f.write(code)
print('[*]save successfully...')
try:
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]open failed')
| 33.914286
| 114
| 0.529346
| 1,239
| 10,683
| 4.414044
| 0.263923
| 0.019016
| 0.008228
| 0.005485
| 0.237703
| 0.175901
| 0.167307
| 0.156336
| 0.146096
| 0.1322
| 0
| 0.026158
| 0.330806
| 10,683
| 315
| 115
| 33.914286
| 0.738845
| 0.078255
| 0
| 0.270916
| 0
| 0.01992
| 0.198748
| 0.035017
| 0.007968
| 0
| 0
| 0
| 0
| 1
| 0.087649
| false
| 0.007968
| 0.119522
| 0.015936
| 0.326693
| 0.091633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc24c739bd5d57047e0ff4c5f882289fbb007117
| 722
|
py
|
Python
|
corehq/apps/app_manager/tests/test_xml_parsing.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/app_manager/tests/test_xml_parsing.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/app_manager/tests/test_xml_parsing.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import SimpleTestCase as TestCase
from corehq.apps.app_manager.models import _parse_xml
import os
class XMLParsingTest(TestCase):
def testUnicodeError(self):
"""Tests a bug found in Unicode processing of a form"""
file_path = os.path.join(os.path.dirname(__file__), "data", "unicode_error_form.xhtml")
with open(file_path, "rb") as f:
xml_data = f.read()
try:
_parse_xml(xml_data) # this should not raise an error
except:
self.fail("Parsing normal string data shouldn't fail!")
try:
_parse_xml(unicode(xml_data))
except:
self.fail("Parsing unicode data shouldn't fail!")
| 36.1
| 95
| 0.634349
| 95
| 722
| 4.631579
| 0.568421
| 0.054545
| 0.05
| 0.095455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272853
| 722
| 19
| 96
| 38
| 0.838095
| 0.112188
| 0
| 0.25
| 0
| 0
| 0.170079
| 0.037795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc26599fa48fc7ee6289bde05e441a088fd069d9
| 447
|
py
|
Python
|
swapsort.py
|
ArshSood/sorting
|
97e1188ad626420e8ffeab992f7e98a2a91ae4b1
|
[
"Apache-2.0"
] | null | null | null |
swapsort.py
|
ArshSood/sorting
|
97e1188ad626420e8ffeab992f7e98a2a91ae4b1
|
[
"Apache-2.0"
] | null | null | null |
swapsort.py
|
ArshSood/sorting
|
97e1188ad626420e8ffeab992f7e98a2a91ae4b1
|
[
"Apache-2.0"
] | null | null | null |
# sorting
n=int(input())
array=list(map(int,input().split()))
i=0
count=[]
counter=0
while i<len(array):
min=i
start=i+1
while(start<len(array)):
if array[start]<array[min]:
min=start
start+=1
if i!=min:
array[i],array[min]=array[min],array[i]
count.append(i)
count.append(min)
counter+=1
i+=1
print(counter)
for i in range(0,len(count)):
print(count[i],end=" ")
| 19.434783
| 47
| 0.557047
| 71
| 447
| 3.507042
| 0.338028
| 0.128514
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021021
| 0.255034
| 447
| 22
| 48
| 20.318182
| 0.726727
| 0.01566
| 0
| 0
| 0
| 0
| 0.002283
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc26c7b5181466b2721115acd12b6c40ca2fe4ae
| 7,699
|
py
|
Python
|
preprocessing/booking.py
|
madcat1991/clustered_cars
|
a79b83d9d14360c6c51d4bf462217ef690e62c74
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/booking.py
|
madcat1991/clustered_cars
|
a79b83d9d14360c6c51d4bf462217ef690e62c74
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/booking.py
|
madcat1991/clustered_cars
|
a79b83d9d14360c6c51d4bf462217ef690e62c74
|
[
"Apache-2.0"
] | null | null | null |
"""
This script cleans and prepares the data set of bookings for the future usage
"""
import argparse
import logging
import sys
import pandas as pd
from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data
OLD_BREAKPOINT_MATCHER = {
2001: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"), (4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2002: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(4, 6, "Easter"), (4, 20, "Spring and Autumn"),
(5, 25, "SBH"),
(6, 1, "Early Summer"),
(7, 20, "Summer holidays"),
(8, 31, "Early Autumn"),
(9, 14, "Spring and Autumn"),
(10, 26, "Half Terms"),
(11, 2, "Winter"),
(12, 21, "Christmas"), (12, 28, "New Year"),
],
2003: [
(1, 1, "New Year"), (1, 4, "Winter"),
(2, 15, "Half Terms"), (2, 22, "Spring and Autumn"),
(4, 5, "Easter"), (4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"), (12, 27, "New Year"),
],
2004: [
(1, 1, "New Year"), (1, 3, "Winter"),
(2, 14, "Half Terms"), (2, 21, "Spring and Autumn"),
(4, 3, "Easter"), (4, 17, "Spring and Autumn"),
(5, 22, "SBH"), (5, 29, "Early Summer"),
(7, 17, "Summer holidays"),
(8, 28, "Early Autumn"),
(9, 11, "Spring and Autumn"),
(10, 23, "Half Terms"), (10, 30, "Winter"),
(12, 18, "Christmas"),
],
2005: [
(1, 1, "Winter"),
(2, 12, "Half Terms"), (2, 19, "Spring and Autumn"),
(4, 2, "Easter"), (4, 16, "Spring and Autumn"),
(5, 21, "SBH"), (5, 28, "Early Summer"),
(7, 16, "Summer holidays"),
(8, 27, "Early Autumn"),
(9, 10, "Spring and Autumn"),
(10, 22, "Half Terms"), (10, 29, "Winter"),
(12, 17, "Christmas"), (12, 31, "New Year"),
],
2006: [
(1, 1, "New Year"), (1, 7, "Winter"),
(2, 18, "Half Terms"), (2, 25, "Spring and Autumn"),
(4, 8, "Easter"), (4, 22, "Spring and Autumn"),
(5, 27, "SBH"),
(6, 3, "Early Summer"),
(7, 22, "Summer holidays"),
(9, 2, "Early Autumn"), (9, 16, "Spring and Autumn"),
(10, 28, "Half Terms"),
(11, 4, "Winter"),
(12, 23, "Christmas"), (12, 30, "New Year"),
],
2007: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"),
(4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2008: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(3, 22, "Easter"),
(4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"),
],
}
COLS_TO_DROP = [
'pname', 'region', 'sleeps', 'stars', 'proppostcode', # can be taken from property
'bookdate_scoreboard', 'book_year', 'hh_gross', 'hh_net', 'ho', # HH specific
'holidayprice', # correlates with avg_spend_per_head
'bighouse', 'burghisland', 'boveycastle', # no need
'sourcecostid', # is a pair of u'sourcedesc', u'category'
'drivedistance', # correlates with drivetime
]
NOT_NA_COLS = [u'bookcode', u'code', u'propcode', u'year', u'breakpoint', u'avg_spend_per_head']
DATE_COLS = [u'bookdate', u'sdate', u"fdate"]
FLOAT_COLS = [u'avg_spend_per_head', u'drivetime']
INT_COLS = [u'adults', u'babies', u'children', u'pets']
CATEGORICAL_COLS = [u'sourcedesc', u'category']
def get_breakpoint(dt):
breakpoint = None
matcher = OLD_BREAKPOINT_MATCHER.get(dt.year, [])
for _m, _d, _b in matcher:
if _m > dt.month or (_m == dt.month and _d > dt.day):
break
breakpoint = _b
return breakpoint
def fine_tune_df(df):
logging.info(u"DF shape before fine tuning: %s", df.shape)
averages = {col: df[col].dropna().mean() for col in FLOAT_COLS}
zeros = {col: 0 for col in INT_COLS}
most_popular_values = {col: df[col].value_counts().index[0] for col in CATEGORICAL_COLS}
logging.info(u"Filling NA with average: %s", averages)
df = df.fillna(averages)
logging.info(u"Filling NA with zeros: %s", zeros)
df = df.fillna(zeros)
logging.info(u"Filling NA with most populars: %s", most_popular_values)
df = df.fillna(most_popular_values)
df[INT_COLS] = df[INT_COLS].astype(int)
logging.info(u"Before cleaning NA: %s", df.shape)
df = df.dropna(subset=NOT_NA_COLS)
logging.info(u"After cleaning NA: %s", df.shape)
if pd.isnull(df.values).any():
logging.error(u"NA values left in df")
return df
def fill_missed_breakpoints(df):
df = df[pd.notnull(df.breakpoint) | pd.notnull(df.zone_name)]
logging.info(u"Bookings having breakpoint or zone_name: %s", df.shape[0])
logging.info(u"Filling missing breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
df.breakpoint[pd.isnull(df.breakpoint)] = df[pd.isnull(df.breakpoint)].sdate.apply(get_breakpoint)
logging.info(u"Left NA breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
return df.drop(u'zone_name', axis=1)
def main():
check_data(args.input_csv, args.input_csv_delimiter)
df = raw_data_to_df(args.input_csv, args.input_csv_delimiter)
original_columns = df.columns
logging.info(u"DF initial shape: %s", df.shape)
df = df.drop(COLS_TO_DROP, axis=1)
df = canonize_datetime(df, DATE_COLS)
df = fill_missed_breakpoints(df)
df = fine_tune_df(df)
processed_columns = set(df.columns).union(COLS_TO_DROP + [u'zone_name'])
check_processed_columns(processed_columns, original_columns)
logging.info(u"Dumping data to: %s", args.output_csv)
df.to_csv(args.output_csv, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', required=True, dest="input_csv",
help=u'Path to a csv file with bookings')
parser.add_argument('--id', default=";", dest="input_csv_delimiter",
help=u"The input file's delimiter. Default: ';'")
parser.add_argument('-o', default="bookings.csv", dest="output_csv",
help=u'Path to an output file. Default: booking.csv')
parser.add_argument("--log-level", default='INFO', dest="log_level",
choices=['DEBUG', 'INFO', 'WARNINGS', 'ERROR'], help=u"Logging level")
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', stream=sys.stdout, level=getattr(logging, args.log_level)
)
main()
| 36.661905
| 113
| 0.56267
| 1,067
| 7,699
| 3.951265
| 0.226804
| 0.051233
| 0.085389
| 0.030361
| 0.274431
| 0.223672
| 0.205882
| 0.190228
| 0.190228
| 0.171252
| 0
| 0.066794
| 0.251331
| 7,699
| 209
| 114
| 36.837321
| 0.664643
| 0.029354
| 0
| 0.235955
| 0
| 0
| 0.284182
| 0.003351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.02809
| 0
| 0.067416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc272e521d0e985bdda9352e00baa8b30c9ad89c
| 1,309
|
py
|
Python
|
src/api/wish.py
|
PKU-GeekGame/gs-backend
|
d13219609d4e52810540bda6a3bddac1bf5406ce
|
[
"MIT"
] | 7
|
2022-02-06T09:49:27.000Z
|
2022-03-03T14:23:32.000Z
|
src/api/wish.py
|
PKU-GeekGame/gs-backend
|
d13219609d4e52810540bda6a3bddac1bf5406ce
|
[
"MIT"
] | null | null | null |
src/api/wish.py
|
PKU-GeekGame/gs-backend
|
d13219609d4e52810540bda6a3bddac1bf5406ce
|
[
"MIT"
] | null | null | null |
from sanic import Blueprint, Request, HTTPResponse, response
from sanic.models.handler_types import RouteHandler
from functools import wraps
from inspect import isawaitable
from typing import Callable, Dict, Any, Union, Awaitable, List, Optional
ACCEPTED_WISH_VERS = ['wish.alpha.v1']
WishHandler = Callable[..., Union[Dict[str, Any], Awaitable[Dict[str, Any]]]]
def wish_endpoint(bp: Blueprint, uri: str, *, methods: Optional[List[str]] = None) -> Callable[[WishHandler], RouteHandler]:
if methods is None:
methods = ['POST']
def decorator(fn: WishHandler) -> RouteHandler:
@wraps(fn)
async def wrapped(req: Request, *args: Any, **kwargs: Any) -> HTTPResponse:
v = req.headers.get('X-Wish-Version', '(none)')
if v not in ACCEPTED_WISH_VERS:
return response.json({
'error': 'WISH_VERSION_MISMATCH',
'error_msg': f'前端版本 {v} 不是最新',
})
retval_ = fn(req, *args, **kwargs)
retval = (await retval_) if isawaitable(retval_) else retval_
return response.json({
'error': None, # may be overridden by retval
**retval,
})
return bp.route(uri, methods)(wrapped) # type: ignore
return decorator
| 37.4
| 124
| 0.612681
| 148
| 1,309
| 5.331081
| 0.47973
| 0.022814
| 0.040558
| 0.058302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001046
| 0.269672
| 1,309
| 35
| 125
| 37.4
| 0.824268
| 0.030558
| 0
| 0.148148
| 0
| 0
| 0.071034
| 0.016575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.185185
| 0
| 0.407407
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc27b29cfbfd1ea2e06f38bfeb18691ed058b5af
| 5,579
|
py
|
Python
|
scripts/venv/lib/python2.7/site-packages/cogent/maths/function_optimisation.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | 3
|
2015-11-20T08:44:42.000Z
|
2016-12-14T01:40:03.000Z
|
scripts/venv/lib/python2.7/site-packages/cogent/maths/function_optimisation.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | 1
|
2017-09-04T14:04:32.000Z
|
2020-05-26T19:04:00.000Z
|
scripts/venv/lib/python2.7/site-packages/cogent/maths/function_optimisation.py
|
sauloal/cnidaria
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Algorthims for function optimisation
great_deluge() is a hillclimbing algorithm based on:
Gunter Dueck: New Optimization Heuristics, The Great Deluge Algorithm
and the Record-to-Record Travel. Journal of Computational Physics, Vol.
104, 1993, pp. 86 - 92
ga_evolve() is a basic genetic algorithm in which all internal functions can
be overridden
NOTE: both optimisation functions are generators.
"""
from numpy.random import normal
__author__ = "Daniel McDonald and Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
__status__ = "Production"
def _simple_breed(best, num, mutation_rate, random_f):
"""Returns num copies of parent with mutation_rate changes"""
result = []
score, parent = best
for child_number in range(num):
if random_f() <= mutation_rate:
child = parent.mutate()
result.append(child)
else:
result.append(parent)
return result
def _simple_score(child, target):
"""Returns the childs score as defined by the childs scoring function"""
return child.score(target)
def _simple_init(parent, num):
"""Creates a list parent copies"""
return [parent.copy() for i in range(num)]
def _simple_select(population, scores):
"""Returns a tuple: (best_score, best_child)"""
scored = zip(scores, population)
scored.sort()
return scored[0]
def great_deluge(a, step_factor=500, max_iter=100, max_total_iters=1000):
"""This generator makes random variations of the object a to minimize cost.
Yields are performed at the end of each iteration and a tuple containing
((iter_count, total_iters), a) is returned. iter_count is used to
kill the while loop in the event that no new objects are found with a
better cost. iter_count gets reset each time an object with a better
cost is found. total_iters will kill the while loop when the total
number of iterations through the loop reaches max_total_iters
Object a must implement methods cost() and perturb() for evaluating
the score and making mutations respectively. Usually, you'll want to
write a wrapper that passes these through to methods of an internal
data object, or functions acting on that object.
"""
water_level = curr_cost = a.cost() # can't be worse than initial guess
step_size = abs(water_level)/step_factor
iter_count = 0
total_iters = 0
while iter_count < max_iter and total_iters < max_total_iters:
new = a.perturb()
new_cost = new.cost()
if new_cost < water_level:
if new_cost < curr_cost:
water_level = max(curr_cost, water_level - step_size)
iter_count = 0 # WARNING: iter_count is reset here!
curr_cost = new_cost
a = new
else:
iter_count += 1
yield ((iter_count, total_iters), a)
total_iters += 1
def ga_evolve(parent, target, num, mutation_rate=0.01, score_f=_simple_score,
breed_f=_simple_breed, select_f=_simple_select,
init_f=_simple_init, random_f=normal, max_generations=1000):
"""Evolves a population based on the parent to the target
Parent must implement methods copy(), mutate(), and score(target) to be
used with the simple default functions.
Yields are performed at the end of each iteration and contain the tuple
(generation, best). The default functions return the tuple
(generation, (best_score, best_obj)).
Arguments:
parent: Object to create initial population from.
target: The goal of the evolution.
num: Population size.
mutation_rate: Rate at which objects in the population are mutated.
score_f: Function to score the object against the target.
breed_f: Function to create new population with mutations
select_f: Function to select best object(s) from the population
random_f: Function to be used in breed_f
max_generations: Kills while loop if max_generations is reached
Overload default functions:
score_f: Must take an object and a target score. Returns objects
score.
breed_f: Must take a tuple containing (scores, objects), the size of
population, a mutation rate and random function to use.
Returns a list containing the initial population. Default
function takes only the best object, but this may not be
desired behavior.
select_f: Must take a population and scores. Returns a tuple
containing the best scores and objects in the population.
Default function returns only the best score and object.
init_f: Must take an object and the size of the population. Returns
a list containing the starting population
"""
generation = 0
population = init_f(parent, num)
while generation < max_generations:
scores = [score_f(child, target) for child in population]
best = select_f(population, scores)
population = breed_f(best, num, mutation_rate, random_f)
yield (generation, best)
generation += 1
| 42.915385
| 80
| 0.661767
| 745
| 5,579
| 4.783893
| 0.303356
| 0.025253
| 0.012346
| 0.010662
| 0.075758
| 0.050505
| 0.024691
| 0.024691
| 0.024691
| 0.024691
| 0
| 0.011631
| 0.275677
| 5,579
| 129
| 81
| 43.248062
| 0.870329
| 0.554938
| 0
| 0.071429
| 0
| 0
| 0.069062
| 0.009799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.017857
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2801c140aa271fa4c9a495e831e1f55bb54ab3
| 6,871
|
py
|
Python
|
collect_policies.py
|
jonathanbglass/parallel_prowler
|
453774a69f078c7fce11c9bb72b6deab6fc04217
|
[
"MIT"
] | 3
|
2021-04-09T12:37:13.000Z
|
2021-10-18T19:41:39.000Z
|
collect_policies.py
|
jonathanbglass/parallel_prowler
|
453774a69f078c7fce11c9bb72b6deab6fc04217
|
[
"MIT"
] | 5
|
2019-04-30T13:08:43.000Z
|
2019-04-30T13:21:25.000Z
|
collect_policies.py
|
jonathanbglass/parallel_prowler
|
453774a69f078c7fce11c9bb72b6deab6fc04217
|
[
"MIT"
] | null | null | null |
import argparse
import boto3
import json
import logging
import os
from progressbar import ProgressBar
import sys
"""
Collects IAM Policies
Evaluates policies looking for badness (*.*, Effect:Allow + NotAction)
Need to add more tests/use cases
"""
def get_policies(profile):
session = boto3.session.Session(profile_name=profile)
myiam = session.client('iam')
marker = None
allPolicies = []
passcount = 1
while True:
pbar = ProgressBar('Collecting Policies')
print("Policy Collection, Pass Number: {}".format(passcount))
passcount += 1
if marker:
response_iterator = myiam.list_policies(OnlyAttached=True,
Marker=marker)
else:
response_iterator = myiam.list_policies(OnlyAttached=True)
for p in pbar(response_iterator['Policies']):
polVers = myiam.get_policy_version(
PolicyArn=p['Arn'], VersionId=p['DefaultVersionId'])
mypol = {'Policy': p, 'PolicyVersion': polVers['PolicyVersion']}
allPolicies.append(mypol)
pfl = open(os.path.join('policies/', p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(mypol, default=str, indent=4))
pfl.close()
ae = myiam.list_entities_for_policy(PolicyArn=p['Arn'])
pfl = open(os.path.join('attachedentities/',
p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(ae, default=str, indent=4))
pfl.close()
try:
marker = response_iterator['Marker']
except KeyError:
break
print("\nTotal Policies: {}".format(len(allPolicies)))
pbar = ProgressBar('\tChecking for Dangerous Policies')
for p in pbar(allPolicies):
# This section looks for bad/dangerous patterns
# Pattern 1: Allow *.*
# AWSLambdaRole {
# 'Version': '2012-10-17',
# 'Statement': [
# {'Effect': 'Allow',
# 'Action': '*',
# 'Resource': ['*']
# }
# ]
# }
try:
q = p['PolicyVersion']['Document']['Statement'][0]
except Exception as e:
print("Problem parsing this policy: {}".format(p))
logging.debug("Problem parsing this policy: {}".format(p))
print(e)
continue
try:
if (q['Effect'] == "Allow" and '*' in q['Resource']
and '*' in q['Action']):
print("Review Dangerous Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
# Pattern 2: Allow: *, NotAction
# {'Version': '2012-10-17',
# 'Statement': [
# {
# 'Effect': 'Allow',
# 'NotAction': ['iam:*', 'organizations:*', 'account:*'],
# 'Resource': '*'
# },
# {
# 'Effect': 'Allow',
# 'Action': [ 'iam:CreateServiceLinkedRole',
# 'iam:DeleteServiceLinkedRole',
# 'iam:ListRoles',
# 'organizations:DescribeOrganization',
# 'account:ListRegions'
# ],
# 'Resource': '*'
# }
# ]}
# This policy blacklists all 'iam:*', 'organizations:*', and
# 'accounts:*' with the NotAction. Then it grants specific
# access in the next stanza ('iam:ListRoles', etc)
# The fatal flaw is that it grants access to everything else,
# like lambda or ec2 because of the "Allow" in the first stanza.
# This user can create an EC2 instance, attach an admin role to
# it, and login and give themselves access to Admin. Instance
# privilege escalation.
try:
if (q['NotAction'] and q['Effect'] == 'Allow'
and q['Resource'] == '*'):
print("Review Suspect Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
return
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global workingProfiles
workingProfiles = []
if not args.profile:
logging.info("Using AWS Default Profile")
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
exit(1)
else:
logging.info("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
return args.profile
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-l", "--log",
help="Log Level")
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename='policyAssessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
profile = check_args_creds(args)
get_policies(profile)
if __name__ == "__main__":
# execute only if run as a script
main()
| 33.193237
| 79
| 0.536603
| 667
| 6,871
| 5.473763
| 0.316342
| 0.027116
| 0.023281
| 0.024651
| 0.209806
| 0.174199
| 0.123254
| 0.058066
| 0.039989
| 0.039989
| 0
| 0.007251
| 0.337651
| 6,871
| 206
| 80
| 33.354369
| 0.79499
| 0.174647
| 0
| 0.241135
| 0
| 0
| 0.159054
| 0.00455
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035461
| false
| 0.035461
| 0.049645
| 0
| 0.120567
| 0.049645
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2ae536bffe1db19ce9b95cd5dd88a0d55394cd
| 3,556
|
py
|
Python
|
test/molecule-role/molecule/integrations/tests/test_nagios.py
|
StackVista/stackstate-agent
|
843f66189fae107646c57f71fed962bdaab3b3be
|
[
"Apache-2.0"
] | 2
|
2018-11-12T22:00:56.000Z
|
2019-11-07T22:14:23.000Z
|
test/molecule-role/molecule/integrations/tests/test_nagios.py
|
StackVista/stackstate-agent
|
843f66189fae107646c57f71fed962bdaab3b3be
|
[
"Apache-2.0"
] | 49
|
2018-10-02T18:14:58.000Z
|
2022-01-20T21:06:31.000Z
|
test/molecule-role/molecule/integrations/tests/test_nagios.py
|
StackVista/stackstate-agent
|
843f66189fae107646c57f71fed962bdaab3b3be
|
[
"Apache-2.0"
] | 3
|
2019-05-10T13:06:59.000Z
|
2020-05-21T17:29:33.000Z
|
import json
import os
import re
from testinfra.utils.ansible_runner import AnsibleRunner
import util
testinfra_hosts = AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('agent-integrations')
def _get_key_value(tag_list):
for key, value in (pair.split(':', 1) for pair in tag_list):
yield key, value
def _component_data(json_data, type_name, external_id_assert_fn, tags_assert_fn):
for message in json_data["messages"]:
p = message["message"]["TopologyElement"]["payload"]
if "TopologyComponent" in p and \
p["TopologyComponent"]["typeName"] == type_name and \
external_id_assert_fn(p["TopologyComponent"]["externalId"]):
data = json.loads(p["TopologyComponent"]["data"])
if tags_assert_fn(dict(_get_key_value(data["tags"]))):
return data
return None
def test_nagios_mysql(host):
def assert_topology():
topo_url = "http://localhost:7070/api/topic/sts_topo_process_agents?limit=1500"
data = host.check_output('curl "{}"'.format(topo_url))
json_data = json.loads(data)
with open("./topic-nagios-topo-process-agents.json", 'w') as f:
json.dump(json_data, f, indent=4)
external_id_pattern = re.compile(r"urn:container:/agent-integrations:.*")
components = [
{
"assertion": "Should find the nagios container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_nagios_1"
},
{
"assertion": "Should find the mysql container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_mysql_1"
}
]
for c in components:
print("Running assertion for: " + c["assertion"])
assert _component_data(
json_data=json_data,
type_name=c["type"],
external_id_assert_fn=c["external_id"],
tags_assert_fn=c["tags"],
) is not None
util.wait_until(assert_topology, 30, 3)
def test_container_metrics(host):
url = "http://localhost:7070/api/topic/sts_multi_metrics?limit=1000"
def wait_for_metrics():
data = host.check_output("curl \"%s\"" % url)
json_data = json.loads(data)
with open("./topic-nagios-sts-multi-metrics.json", 'w') as f:
json.dump(json_data, f, indent=4)
def get_keys(m_host):
return set(
''.join(message["message"]["MultiMetric"]["values"].keys())
for message in json_data["messages"]
if message["message"]["MultiMetric"]["name"] == "convertedMetric" and
message["message"]["MultiMetric"]["host"] == m_host
)
expected = {'nagios.http.size', 'nagios.ping.pl', 'nagios.http.time', 'nagios.current_load.load15',
'nagios.swap_usage.swap', 'nagios.host.pl', 'nagios.root_partition', 'nagios.current_users.users',
'nagios.current_load.load1', 'nagios.host.rta', 'nagios.ping.rta', 'nagios.current_load.load5',
'nagios.total_processes.procs'}
assert all([expectedMetric for expectedMetric in expected if expectedMetric in get_keys("agent-integrations")])
util.wait_until(wait_for_metrics, 180, 3)
| 39.955056
| 119
| 0.604331
| 423
| 3,556
| 4.855792
| 0.328605
| 0.035054
| 0.017527
| 0.02629
| 0.268744
| 0.226874
| 0.199611
| 0.169426
| 0.169426
| 0.169426
| 0
| 0.01219
| 0.261811
| 3,556
| 88
| 120
| 40.409091
| 0.770286
| 0
| 0
| 0.114286
| 0
| 0
| 0.29387
| 0.086614
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.1
| false
| 0
| 0.071429
| 0.014286
| 0.214286
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2b9cb9eed0c84da94b5402d4ee3d9ce1910b43
| 589
|
py
|
Python
|
erudition/util.py
|
papsebestyen/erudition
|
35aa502a96189131baff714a6212eb56de2b1272
|
[
"MIT"
] | null | null | null |
erudition/util.py
|
papsebestyen/erudition
|
35aa502a96189131baff714a6212eb56de2b1272
|
[
"MIT"
] | null | null | null |
erudition/util.py
|
papsebestyen/erudition
|
35aa502a96189131baff714a6212eb56de2b1272
|
[
"MIT"
] | 1
|
2022-02-21T21:17:17.000Z
|
2022-02-21T21:17:17.000Z
|
import os
import sys
from contextlib import contextmanager
from invoke import UnexpectedExit
def git_commit(c, addstr, msg):
try:
c.run("git config --get user.email")
c.run("git config --get user.name")
except UnexpectedExit:
c.run('git config --local user.email "ci@cd.org"')
c.run('git config --local user.name "CI/CD"')
c.run(f'git add {addstr} && git commit -m "{msg}"')
@contextmanager
def cd_into(dirpath):
wd = os.getcwd()
os.chdir(dirpath)
sys.path.insert(0, str(dirpath))
yield
os.chdir(wd)
sys.path.pop(0)
| 22.653846
| 58
| 0.634975
| 89
| 589
| 4.179775
| 0.449438
| 0.053763
| 0.075269
| 0.139785
| 0.225806
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0.004357
| 0.220713
| 589
| 25
| 59
| 23.56
| 0.8061
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2d6707aecf302c38a120bd486580b956d4c75c
| 1,263
|
py
|
Python
|
python3/distortion_correct_aksk_demo.py
|
MeekoI/ais-sdk
|
76240abc49795e914988f3cafb6d08f60dbdcb4c
|
[
"Apache-2.0"
] | null | null | null |
python3/distortion_correct_aksk_demo.py
|
MeekoI/ais-sdk
|
76240abc49795e914988f3cafb6d08f60dbdcb4c
|
[
"Apache-2.0"
] | null | null | null |
python3/distortion_correct_aksk_demo.py
|
MeekoI/ais-sdk
|
76240abc49795e914988f3cafb6d08f60dbdcb4c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from ais_sdk.utils import encode_to_base64
from ais_sdk.utils import decode_to_wave_file
from ais_sdk.distortion_correct import distortion_correct_aksk
from ais_sdk.utils import init_global_env
import json
if __name__ == '__main__':
#
# access moderation distortion correct.post data by ak,sk
#
app_key = '*************'
app_secret = '************'
init_global_env(region='cn-north-1')
demo_data_url = 'https://ais-sample-data.obs.cn-north-1.myhuaweicloud.com/vat-invoice.jpg'
#call interface use the url correction is true means do not correction
result = distortion_correct_aksk(app_key, app_secret, "", demo_data_url, True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-1.png')
else:
print(result)
# call interface use the file
result = distortion_correct_aksk(app_key, app_secret, encode_to_base64('data/moderation-distortion.jpg'), '', True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-2.png')
else:
print(result)
| 39.46875
| 119
| 0.695962
| 177
| 1,263
| 4.672316
| 0.367232
| 0.065296
| 0.048368
| 0.091898
| 0.448609
| 0.37243
| 0.37243
| 0.37243
| 0.270859
| 0.270859
| 0
| 0.008491
| 0.160728
| 1,263
| 32
| 120
| 39.46875
| 0.771698
| 0.136975
| 0
| 0.363636
| 0
| 0.045455
| 0.23893
| 0.095941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.227273
| 0
| 0.227273
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2e4f714b9faba2c5ecf66e26fac9c7e7da6366
| 1,527
|
py
|
Python
|
rainbow/datasources/cfn_datasource.py
|
omribahumi/rainbow
|
17aad61231b1f1b9d0dca43979e2fa4c8a1603f3
|
[
"BSD-2-Clause-FreeBSD"
] | 35
|
2015-01-04T15:23:49.000Z
|
2020-11-24T16:10:33.000Z
|
rainbow/datasources/cfn_datasource.py
|
omribahumi/rainbow
|
17aad61231b1f1b9d0dca43979e2fa4c8a1603f3
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2015-01-20T07:45:41.000Z
|
2015-06-23T15:03:42.000Z
|
rainbow/datasources/cfn_datasource.py
|
omribahumi/rainbow
|
17aad61231b1f1b9d0dca43979e2fa4c8a1603f3
|
[
"BSD-2-Clause-FreeBSD"
] | 17
|
2015-01-04T14:20:31.000Z
|
2020-11-24T16:10:36.000Z
|
from rainbow.cloudformation import Cloudformation
from base import DataSourceBase
__all__ = ['CfnOutputsDataSource', 'CfnResourcesDataSource', 'CfnParametersDataSource']
class CfnDataSourceBase(DataSourceBase):
def __init__(self, data_source):
super(CfnDataSourceBase, self).__init__(data_source)
stack_name = data_source
region = Cloudformation.default_region
if ':' in data_source:
region, stack_name = data_source.split(':', 1)
cfn_connection = Cloudformation(region)
if not cfn_connection:
raise Exception('Invalid region %r' % (region,))
self.stack = cfn_connection.describe_stack(stack_name)
class CfnOutputsDataSource(CfnDataSourceBase):
datasource_name = 'cfn_outputs'
def __init__(self, data_source):
super(CfnOutputsDataSource, self).__init__(data_source)
self.data = {i.key: i.value for i in self.stack.outputs}
class CfnResourcesDataSource(CfnDataSourceBase):
datasource_name = 'cfn_resources'
def __init__(self, data_source):
super(CfnResourcesDataSource, self).__init__(data_source)
self.data = {r.logical_resource_id: r.physical_resource_id for r in self.stack.describe_resources()}
class CfnParametersDataSource(CfnDataSourceBase):
datasource_name = 'cfn_parameters'
def __init__(self, data_source):
super(CfnParametersDataSource, self).__init__(data_source)
self.data = {p.key: p.value for p in self.stack.parameters}
| 30.54
| 108
| 0.717092
| 163
| 1,527
| 6.312883
| 0.288344
| 0.1069
| 0.04276
| 0.058309
| 0.176871
| 0.176871
| 0
| 0
| 0
| 0
| 0
| 0.000812
| 0.193844
| 1,527
| 49
| 109
| 31.163265
| 0.835093
| 0
| 0
| 0.137931
| 0
| 0
| 0.079895
| 0.02947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.068966
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2e92525a1caaa4ddf0a3ef664b415296525d97
| 2,338
|
py
|
Python
|
python/flexflow_cffi_build.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 455
|
2018-12-09T01:57:46.000Z
|
2022-03-22T01:56:47.000Z
|
python/flexflow_cffi_build.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 136
|
2019-04-19T08:24:27.000Z
|
2022-03-28T01:39:19.000Z
|
python/flexflow_cffi_build.py
|
zmxdream/FlexFlow
|
7ea50d71a02e853af7ae573d88c911511b3e82e0
|
[
"Apache-2.0"
] | 102
|
2018-12-22T07:38:05.000Z
|
2022-03-30T06:04:39.000Z
|
#!/usr/bin/env python
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_flexflow_header(ffhome_dir):
def try_prefix(prefix_dir):
flexflow_ch_path = os.path.join(prefix_dir, 'python', 'flexflow_c.h')
flexflow_cxxh_path = os.path.join(prefix_dir, 'include', 'model.h')
if os.path.exists(flexflow_ch_path) and os.path.exists(flexflow_cxxh_path):
flexflow_cxxh_dir = os.path.join(prefix_dir, 'include')
return flexflow_cxxh_dir, flexflow_ch_path
result = try_prefix(ffhome_dir)
if result:
return result
raise Exception('Unable to locate flexflow_c.h and flexflow.h header file')
def build(output_dir, libname, ffhome_dir):
flexflow_cxxh_dir, flexflow_ch_path = find_flexflow_header(ffhome_dir)
header = subprocess.check_output(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexflow_cffi_header.py.in')) as f:
content = f.read()
content = content.format(header=repr(header), libname=repr(libname))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'flexflow_cffi_header.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ffhome-dir', required=True)
parser.add_argument('--libname', required=True)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.output_dir, args.libname, args.ffhome_dir)
| 38.327869
| 116
| 0.727545
| 339
| 2,338
| 4.79351
| 0.418879
| 0.040615
| 0.043077
| 0.031385
| 0.213538
| 0.122462
| 0.038154
| 0
| 0
| 0
| 0
| 0.005102
| 0.161677
| 2,338
| 60
| 117
| 38.966667
| 0.82398
| 0.260479
| 0
| 0
| 0
| 0
| 0.120187
| 0.028588
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.125
| 0
| 0.28125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2eebcbe5bb3cf4ff6427b453a41d0127cdd332
| 1,414
|
py
|
Python
|
gaphor/plugins/xmiexport/__init__.py
|
tuxcell/gaphor
|
22eb13479f589a0105ad25a11aed968e9ad932dc
|
[
"Apache-2.0"
] | null | null | null |
gaphor/plugins/xmiexport/__init__.py
|
tuxcell/gaphor
|
22eb13479f589a0105ad25a11aed968e9ad932dc
|
[
"Apache-2.0"
] | null | null | null |
gaphor/plugins/xmiexport/__init__.py
|
tuxcell/gaphor
|
22eb13479f589a0105ad25a11aed968e9ad932dc
|
[
"Apache-2.0"
] | null | null | null |
"""This plugin extends Gaphor with XMI export functionality."""
import logging
from gaphor.abc import ActionProvider, Service
from gaphor.core import action, gettext
from gaphor.plugins.xmiexport import exportmodel
from gaphor.ui.filedialog import FileDialog
logger = logging.getLogger(__name__)
class XMIExport(Service, ActionProvider):
def __init__(self, element_factory, file_manager, export_menu):
self.element_factory = element_factory
self.file_manager = file_manager
export_menu.add_actions(self)
def shutdown(self):
pass
@action(
name="file-export-xmi",
label=gettext("Export to XMI"),
tooltip=gettext("Export model to XMI (XML Model Interchange) format"),
)
def execute(self):
filename = self.file_manager.filename
filename = filename.replace(".gaphor", ".xmi") if filename else "model.xmi"
file_dialog = FileDialog(
gettext("Export model to XMI file"), action="save", filename=filename
)
filename = file_dialog.selection
if filename and len(filename) > 0:
logger.debug(f"Exporting XMI model to: {filename}")
export = exportmodel.XMIExport(self.element_factory)
try:
export.export(filename)
except Exception as e:
logger.error(f"Error while saving model to file {filename}: {e}")
| 32.883721
| 83
| 0.666195
| 164
| 1,414
| 5.615854
| 0.408537
| 0.043431
| 0.058632
| 0.045603
| 0.049946
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000934
| 0.242574
| 1,414
| 42
| 84
| 33.666667
| 0.85901
| 0.040311
| 0
| 0
| 0
| 0
| 0.15396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0.03125
| 0.15625
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2f49e15f4138f716bca2a01da611b02c245377
| 2,278
|
py
|
Python
|
tests/utils.py
|
btk15049/online-judge-tools
|
22505e98359c50df06e7cc1d53a7d253cb096b14
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
btk15049/online-judge-tools
|
22505e98359c50df06e7cc1d53a7d253cb096b14
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
btk15049/online-judge-tools
|
22505e98359c50df06e7cc1d53a7d253cb096b14
|
[
"MIT"
] | null | null | null |
import contextlib
import os
import pathlib
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def chdir(path):
cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def prepare_files(files):
for f in files:
path = pathlib.Path(f['path'])
path.parent.mkdir(parents=True, exist_ok=True)
with open(str(path), 'w') as fh:
fh.write(f['data'])
if f.get('executable', False):
path.chmod(0o755)
@contextlib.contextmanager
def sandbox(files):
with tempfile.TemporaryDirectory() as tempdir:
with chdir(tempdir):
prepare_files(files)
yield tempdir
def get_oj_exe():
oj_exe = os.environ.get('TEST_OJ_EXE')
if oj_exe is not None:
return [str(pathlib.Path(oj_exe).resolve())]
else:
return [sys.executable, '-m', 'onlinejudge._implementation.main']
def run(args, *, env=None, check=False, oj_exe=get_oj_exe()):
# oj_exe should be evaluated out of sandboxes
env = env or dict(os.environ)
env['PYTHONPATH'] = str(pathlib.Path(__file__).parent.parent) # this is required to run in sandboxes
return subprocess.run(oj_exe + args, stdout=subprocess.PIPE, stderr=sys.stderr, env=env, check=check)
def run_in_sandbox(args, files):
with sandbox(files) as tempdir:
proc = run(args)
return {
'proc': proc,
'tempdir': tempdir,
}
def cat():
if os.name == 'nt':
return '{} -c "import sys; sys.stdout.buffer.write(sys.stdin.buffer.read())"'.format(sys.executable)
else:
return 'cat'
def sleep_1sec():
if os.name == 'nt':
return '{} -c "import time; time.sleep(1)"'.format(sys.executable)
else:
return 'sleep 1.0'
def python_c(cmd):
assert '"' not in cmd
return '{} -c "{}"'.format(sys.executable, cmd)
def python_script(path):
assert '"' not in path
return '{} "{}"'.format(sys.executable, path)
def is_logged_in(service, memo={}):
# functools.lru_cache is unusable since Service are unhashable
url = service.get_url()
if url not in memo:
proc = run(['login', '--check', url])
memo[url] = proc.returncode == 0
return memo[url]
| 24.76087
| 108
| 0.618086
| 306
| 2,278
| 4.506536
| 0.356209
| 0.032632
| 0.055112
| 0.014503
| 0.094271
| 0.033358
| 0.033358
| 0
| 0
| 0
| 0
| 0.005239
| 0.24583
| 2,278
| 91
| 109
| 25.032967
| 0.797439
| 0.061896
| 0
| 0.104478
| 0
| 0.014925
| 0.109705
| 0.037975
| 0
| 0
| 0
| 0
| 0.029851
| 1
| 0.164179
| false
| 0
| 0.119403
| 0
| 0.447761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc2f8d6fdf5321bc7fa432fe83690f0311e43ce9
| 303
|
py
|
Python
|
git_operation.py
|
zerzerzerz/Computer-Virus
|
4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8
|
[
"MIT"
] | null | null | null |
git_operation.py
|
zerzerzerz/Computer-Virus
|
4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8
|
[
"MIT"
] | null | null | null |
git_operation.py
|
zerzerzerz/Computer-Virus
|
4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8
|
[
"MIT"
] | null | null | null |
import os
commit_string = "选择data的前多少个维度参与训练"
not_add = ['results', 'data', 'weights']
for item in os.listdir():
if item in not_add:
# print(item)
continue
else:
os.system(f"git add {item}")
os.system(f'git commit -m "{commit_string}"')
os.system("git push origin main")
| 25.25
| 45
| 0.636964
| 44
| 303
| 4.295455
| 0.568182
| 0.126984
| 0.095238
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214521
| 303
| 12
| 46
| 25.25
| 0.794118
| 0.036304
| 0
| 0
| 0
| 0
| 0.343643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc3035214a995b5b1335519d9f36c232352adce4
| 6,523
|
py
|
Python
|
src/cool_grammar.py
|
peanut-butter-jellyyy/cool-compiler-2021
|
63a668d435ed22cfb8dbb096bc3c82a34f09517b
|
[
"MIT"
] | null | null | null |
src/cool_grammar.py
|
peanut-butter-jellyyy/cool-compiler-2021
|
63a668d435ed22cfb8dbb096bc3c82a34f09517b
|
[
"MIT"
] | null | null | null |
src/cool_grammar.py
|
peanut-butter-jellyyy/cool-compiler-2021
|
63a668d435ed22cfb8dbb096bc3c82a34f09517b
|
[
"MIT"
] | null | null | null |
from src.cmp.pycompiler import Grammar
from src.ast_nodes import (
ProgramNode,
ClassDeclarationNode,
FuncDeclarationNode,
AttrDeclarationNode,
IfNode,
WhileNode,
LetNode,
CaseNode,
IsvoidNode,
AssignNode,
VarDeclarationNode,
CaseItemNode,
NotNode,
LessNode,
LessEqualNode,
EqualNode,
PlusNode,
MinusNode,
StarNode,
DivNode,
NegNode,
InstantiateNode,
BlockNode,
CallNode,
ConstantNumNode,
VariableNode,
BooleanNode,
StringNode,
)
def define_cool_grammar(print_grammar=False):
# grammar
G = Grammar()
# non-terminals
program = G.NonTerminal("<program>", startSymbol=True)
class_list, def_class = G.NonTerminals("<class-list> <def-class>")
feature_list, def_attr, def_func = G.NonTerminals(
"<feature-list> <def-attr> <def-func>"
)
param_list, param = G.NonTerminals("<param-list> <param>")
expr, comp, arith, term, factor, element, atom = G.NonTerminals(
"<expr> <comp> <arith> <term> <factor> <element> <atom>"
)
identifiers_list, identifier_init = G.NonTerminals("<ident-list> <ident-init>")
block, case_block, case_item = G.NonTerminals("<block> <case-block> <case-item>")
func_call, arg_list = G.NonTerminals("<func-call> <arg-list>")
# terminals
classx, inherits, notx, isvoid = G.Terminals("class inherits not isvoid")
let, inx = G.Terminals("let in")
ifx, then, elsex, fi = G.Terminals("if then else fi")
whilex, loop, pool = G.Terminals("while loop pool")
case, of, esac = G.Terminals("case of esac")
semi, colon, comma, dot, opar, cpar, ocur, ccur, at, larrow, rarrow = G.Terminals(
"; : , . ( ) { } @ <- =>"
)
equal, plus, minus, star, div, less, equal, lesseq, neg = G.Terminals(
"= + - * / < = <= ~"
)
idx, num, new, string, true, false = G.Terminals("id int new string true false")
# productions
program %= class_list, lambda h, s: ProgramNode(s[1])
class_list %= def_class + class_list, lambda h, s: [s[1]] + s[2]
class_list %= def_class, lambda h, s: [s[1]]
def_class %= (
classx + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[4]),
)
def_class %= (
classx + idx + inherits + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[6], s[4]),
)
feature_list %= def_attr + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= def_func + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= G.Epsilon, lambda h, s: []
def_attr %= (
idx + colon + idx + larrow + expr,
lambda h, s: AttrDeclarationNode(s[1], s[3], s[5]),
)
def_attr %= idx + colon + idx, lambda h, s: AttrDeclarationNode(s[1], s[3])
def_func %= (
idx + opar + param_list + cpar + colon + idx + ocur + expr + ccur,
lambda h, s: FuncDeclarationNode(s[1], s[3], s[6], s[8]),
)
param_list %= param + comma + param_list, lambda h, s: [s[1]] + s[3]
param_list %= param, lambda h, s: [s[1]]
param_list %= G.Epsilon, lambda h, s: []
param %= idx + colon + idx, lambda h, s: (s[1], s[3])
expr %= idx + larrow + expr, lambda h, s: AssignNode(s[1], s[3])
expr %= let + identifiers_list + inx + expr, lambda h, s: LetNode(s[2], s[4])
expr %= (
ifx + expr + then + expr + elsex + expr + fi,
lambda h, s: IfNode(s[2], s[4], s[6]),
)
expr %= whilex + expr + loop + expr + pool, lambda h, s: WhileNode(s[2], s[4])
expr %= case + expr + of + case_block + esac, lambda h, s: CaseNode(s[2], s[4])
expr %= notx + expr, lambda h, s: NotNode(s[2])
expr %= comp, lambda h, s: s[1]
identifiers_list %= (
identifier_init + comma + identifiers_list,
lambda h, s: [s[1]] + s[3],
)
identifiers_list %= identifier_init, lambda h, s: [s[1]]
identifier_init %= (
idx + colon + idx + larrow + expr,
lambda h, s: VarDeclarationNode(s[1], s[3], s[5]),
)
identifier_init %= idx + colon + idx, lambda h, s: VarDeclarationNode(s[1], s[3])
case_block %= case_item + case_block, lambda h, s: [s[1]] + s[2]
case_block %= case_item, lambda h, s: [s[1]]
case_item %= (
idx + colon + idx + rarrow + expr + semi,
lambda h, s: CaseItemNode(s[1], s[3], s[5]),
)
comp %= comp + less + arith, lambda h, s: LessNode(s[1], s[3])
comp %= comp + equal + arith, lambda h, s: EqualNode(s[1], s[3])
comp %= comp + lesseq + arith, lambda h, s: LessEqualNode(s[1], s[3])
comp %= arith, lambda h, s: s[1]
arith %= arith + plus + term, lambda h, s: PlusNode(s[1], s[3])
arith %= arith + minus + term, lambda h, s: MinusNode(s[1], s[3])
arith %= term, lambda h, s: s[1]
term %= term + star + factor, lambda h, s: StarNode(s[1], s[3])
term %= term + div + factor, lambda h, s: DivNode(s[1], s[3])
term %= factor, lambda h, s: s[1]
factor %= isvoid + element, lambda h, s: IsvoidNode(s[2])
factor %= neg + element, lambda h, s: NegNode(s[2])
factor %= new + idx, lambda h, s: InstantiateNode(s[2])
factor %= element, lambda h, s: s[1]
element %= opar + expr + cpar, lambda h, s: s[2]
element %= ocur + block + ccur, lambda h, s: BlockNode(s[2])
element %= (element + dot + func_call, lambda h, s: CallNode(*s[3], obj=s[1]))
element %= (
element + at + idx + dot + func_call,
lambda h, s: CallNode(*s[5], obj=s[1], at_type=s[3]),
)
element %= func_call, lambda h, s: CallNode(*s[1])
element %= atom, lambda h, s: s[1]
atom %= num, lambda h, s: ConstantNumNode(s[1])
atom %= idx, lambda h, s: VariableNode(s[1])
atom %= (
true,
lambda h, s: BooleanNode(s[1]),
)
atom %= false, lambda h, s: BooleanNode(s[1])
atom %= string, lambda h, s: StringNode(s[1])
block %= expr + semi, lambda h, s: [s[1]]
block %= expr + semi + block, lambda h, s: [s[1]] + s[3]
func_call %= idx + opar + arg_list + cpar, lambda h, s: (s[1], s[3])
arg_list %= expr + comma + arg_list, lambda h, s: [s[1]] + s[3]
arg_list %= expr, lambda h, s: [s[1]]
arg_list %= G.Epsilon, lambda h, s: []
if print_grammar:
print(G)
return (G, idx, string, num)
| 36.038674
| 87
| 0.559099
| 917
| 6,523
| 3.904035
| 0.142857
| 0.117318
| 0.134078
| 0.057821
| 0.352793
| 0.248883
| 0.194693
| 0.13743
| 0.065922
| 0.065922
| 0
| 0.020594
| 0.27794
| 6,523
| 180
| 88
| 36.238889
| 0.73949
| 0.006592
| 0
| 0.026667
| 0
| 0.006667
| 0.057824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006667
| false
| 0
| 0.013333
| 0
| 0.026667
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc30849700e1ea4826d82e5040dc0a3f7cab1d33
| 1,599
|
py
|
Python
|
userbot/plugins/selfdestruct.py
|
Aliensuniquebot/CatUserbot
|
93561a620fc1198c6fe6c259412088f4bc81d97b
|
[
"MIT"
] | 1
|
2020-07-18T07:42:58.000Z
|
2020-07-18T07:42:58.000Z
|
userbot/plugins/selfdestruct.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | null | null | null |
userbot/plugins/selfdestruct.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | 2
|
2020-06-25T11:14:50.000Z
|
2021-04-04T13:49:13.000Z
|
# For @UniBorg
# courtesy Yasir siddiqui
"""Self Destruct Plugin
.sd <time in seconds> <text>
"""
import time
from userbot import CMD_HELP
from telethon.errors import rpcbaseerrors
from userbot.utils import admin_cmd
import importlib.util
@borg.on(admin_cmd(pattern="sdm", outgoing=True))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[5:7])
text = str(destroy.text[7:])
text = (
text
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
@borg.on(admin_cmd(pattern="selfd", outgoing=True ))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[7:9])
text = str(destroy.text[9:])
text = (
text
+ "\n\n`This message shall be self-destructed in "
+ str(counter)
+ " seconds`"
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
CMD_HELP.update({
"selfdestruct":
".sdm number | [text]\
\nUsage: self destruct this message in number seconds \
\n\n.self number | [text]\
\nUsage:self destruct this message in number seconds with showing that it will destruct. \
"
})
| 28.052632
| 90
| 0.602251
| 196
| 1,599
| 4.867347
| 0.352041
| 0.092243
| 0.050314
| 0.02935
| 0.612159
| 0.568134
| 0.568134
| 0.568134
| 0.568134
| 0.568134
| 0
| 0.008525
| 0.266417
| 1,599
| 56
| 91
| 28.553571
| 0.804774
| 0.054409
| 0
| 0.428571
| 0
| 0
| 0.055186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.119048
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc311bab55ecedbbd72f07232e73e6cac438a6b2
| 1,101
|
py
|
Python
|
snippets/basic_render_template_class.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
snippets/basic_render_template_class.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
snippets/basic_render_template_class.py
|
OSAMAMOHAMED1234/python_projects
|
fb4bc7356847c3f46df690a9386cf970377a6f7c
|
[
"MIT"
] | null | null | null |
import os
class Template:
template_name = ''
context = None
def __init__(self, template_name='', context=None, *args, **kwargs):
self.template_name = template_name
self.context = context
def get_template(self):
template_path = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), self.template_name)
if not os.path.exists(template_path):
raise Exception(f'This path does not exist : {template_path}')
template_string = ''
with open(template_path, 'r') as f:
template_string = f.read()
return template_string
def render(self, context=None):
render_ctx = context
if self.context != None:
render_ctx = self.context
if not isinstance(render_ctx, dict):
render_ctx = {}
template_string = self.get_template()
return template_string.format(**render_ctx)
obj = Template(template_name='test.html', context={'name': 'OSAMA'})
print(obj.render())
obj.context= None
print(obj.render(context={'name': 'os'}))
obj2 = Template(template_name='test.html')
print(obj2.render(context={'name': 'os'}))
| 30.583333
| 123
| 0.693006
| 149
| 1,101
| 4.912752
| 0.308725
| 0.114754
| 0.081967
| 0.062842
| 0.177596
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002174
| 0.164396
| 1,101
| 36
| 124
| 30.583333
| 0.793478
| 0
| 0
| 0
| 0
| 0
| 0.082577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.034483
| 0
| 0.310345
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc3abec567aafacd5d2829eabdf814ac53962d6d
| 495
|
py
|
Python
|
tests/comments/test_only_block_comment.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 2
|
2021-12-18T01:52:50.000Z
|
2022-01-17T19:41:52.000Z
|
tests/comments/test_only_block_comment.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 18
|
2021-11-30T04:05:53.000Z
|
2022-02-01T03:30:04.000Z
|
tests/comments/test_only_block_comment.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
import pytest
from pylox.lox import Lox
TEST_SRC = dedent(
"""\
/*
This is a multiline block comment
*/
"""
)
EXPECTED_STDOUTS: list[str] = []
def test_block_comment_at_eof(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 18.333333
| 69
| 0.70101
| 62
| 495
| 5.387097
| 0.580645
| 0.041916
| 0.11976
| 0.137725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206061
| 495
| 26
| 70
| 19.038462
| 0.849873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc3d1481782a2c4ff97885d3937f7846223c55ab
| 1,082
|
py
|
Python
|
setup.py
|
sturmianseq/observed
|
d99fb99ff2a470a86efb2763685e8e2c021e799f
|
[
"MIT"
] | 33
|
2015-04-29T08:11:42.000Z
|
2022-02-01T16:50:25.000Z
|
setup.py
|
sturmianseq/observed
|
d99fb99ff2a470a86efb2763685e8e2c021e799f
|
[
"MIT"
] | 15
|
2015-02-04T15:11:17.000Z
|
2022-01-26T19:58:29.000Z
|
setup.py
|
sturmianseq/observed
|
d99fb99ff2a470a86efb2763685e8e2c021e799f
|
[
"MIT"
] | 6
|
2017-06-11T19:40:31.000Z
|
2021-08-05T07:57:28.000Z
|
import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="Daniel Sank",
author_email="sank.daniel@gmail.com",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 27.05
| 67
| 0.685767
| 129
| 1,082
| 5.573643
| 0.55814
| 0.083449
| 0.052851
| 0.083449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00225
| 0.178373
| 1,082
| 39
| 68
| 27.74359
| 0.806524
| 0.047135
| 0
| 0
| 0
| 0
| 0.327821
| 0.020428
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc3d20f3595ad33d1e9d9bf80ce974904075e7ce
| 3,536
|
py
|
Python
|
src/coco.py
|
catalyst-team/detector
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | 15
|
2019-05-15T13:42:51.000Z
|
2020-11-09T23:13:06.000Z
|
src/coco.py
|
catalyst-team/detector
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | 1
|
2020-01-09T08:53:49.000Z
|
2020-01-16T19:41:16.000Z
|
src/coco.py
|
catalyst-team/detection
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import numpy as np
import pickle
from typing import Any
from pycocotools.coco import COCO
from torch.utils.data import Dataset
class DetectionMSCOCODataset(Dataset):
def __init__(self, annotation_file: str, image_dir: str):
self._annotation_file = annotation_file
self._image_dir = image_dir
self._cache_file = self._annotation_file + ".cache"
self._coco = COCO(self._annotation_file)
self._img_ids = self._coco.getImgIds()
self._cat_ids = self._coco.getCatIds()
self._ann_ids = self._coco.getAnnIds()
self._data = "coco"
self._classes = {
ind: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._load_data()
self._db_inds = np.arange(len(self._image_names))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_names], f)
print("Cache file created")
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_names = pickle.load(f)
def _load_coco_data(self):
with open(self._annotation_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._image_names = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in self._img_ids
]
self._detections = {}
for ind, (coco_image_id, image_name) in enumerate(zip(self._img_ids, self._image_names)):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
self._detections[image_name] = [{
'bbox': bbox.astype(np.float32),
'category_id': category,
'category_name': self.class_name(category)
} for bbox, category in zip(bboxes, categories)]
def __getitem__(self, ind: int) -> Any:
image_name = self._image_names[ind]
return {
'image_name': os.path.join(self._image_dir, image_name),
'detections': self._detections[image_name]
}
def __len__(self) -> int:
return len(self._img_ids)
def get_num_classes(self) -> int:
return len(self._cat_ids)
| 32.440367
| 97
| 0.588235
| 438
| 3,536
| 4.399543
| 0.226027
| 0.062273
| 0.045667
| 0.021796
| 0.104826
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00406
| 0.30345
| 3,536
| 108
| 98
| 32.740741
| 0.778319
| 0
| 0
| 0
| 0
| 0
| 0.047511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094118
| false
| 0
| 0.082353
| 0.023529
| 0.235294
| 0.035294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc3e56f1b6dc2446fe20c8456364bfd95e849dd0
| 7,538
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
'-de-' + notebook_config['exploratory_name'] + '-' + \
notebook_config['computational_name']
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
" --keyfile {5} --notebook_ip {6} --datalake_enabled {7} --spark_master_ip {8}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'], notebook_config['notebook_ip'],
os.environ['azure_datalake_enable'], notebook_config['spark_master_ip'])
try:
local("~/scripts/{}_{}.py {}".format(os.environ['application'], 'install_dataengine_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
logging.info('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
print('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
params = "--hostname {0} " \
"--keyfile {1} " \
"--os_user {2} " \
"--cluster_name {3} " \
.format(notebook_config['notebook_ip'],
notebook_config['key_path'],
notebook_config['dlab_ssh_user'],
notebook_config['cluster_name'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
| 51.986207
| 122
| 0.628549
| 837
| 7,538
| 5.369176
| 0.249701
| 0.183801
| 0.080107
| 0.054072
| 0.487094
| 0.384513
| 0.313529
| 0.277926
| 0.252114
| 0.243213
| 0
| 0.006487
| 0.222871
| 7,538
| 144
| 123
| 52.347222
| 0.760669
| 0.127089
| 0
| 0.362832
| 0
| 0.00885
| 0.31534
| 0.031717
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.070796
| 0
| 0.070796
| 0.088496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc40aa5a0884df8e751f2fa5cfb93216f3c13768
| 16,560
|
py
|
Python
|
magenta/models/sketch_rnn/rnn.py
|
laurens-in/magenta
|
be6ed8d5b1eb2986ca277aa9c574a7912dd5ed0f
|
[
"Apache-2.0"
] | 1
|
2021-12-27T10:43:39.000Z
|
2021-12-27T10:43:39.000Z
|
magenta/models/sketch_rnn/rnn.py
|
kyungyunlee/magenta
|
cf80d19fc0c2e935821f284ebb64a8885f793717
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/sketch_rnn/rnn.py
|
kyungyunlee/magenta
|
cf80d19fc0c2e935821f284ebb64a8885f793717
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import rnn as contrib_rnn
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(contrib_rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(contrib_rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(contrib_rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
c = total_c[:, 0:self.num_units]
self.hyper_state = tf.concat(
[total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)
batch_size = x.get_shape().as_list()[0]
x_size = x.get_shape().as_list()[1]
self._input_size = x_size
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
# concatenate the input and hidden states for hyperlstm input
hyper_input = tf.concat([x, h], 1)
hyper_output, hyper_new_state = self.hyper_cell(hyper_input,
self.hyper_state)
self.hyper_output = hyper_output
self.hyper_state = hyper_new_state
xh = tf.matmul(x, w_xh)
hh = tf.matmul(h, w_hh)
# split Wxh contributions
ix, jx, fx, ox = tf.split(xh, 4, 1)
ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False)
jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False)
fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False)
ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False)
# split Whh contributions
ih, jh, fh, oh = tf.split(hh, 4, 1)
ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True)
jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True)
fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True)
oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True)
# split bias
ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i = ix + ih + ib
j = jx + jh + jb
f = fx + fh + fb
o = ox + oh + ob
if self.use_layer_norm:
concat = tf.concat([i, j, f, o], 1)
concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)
hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1)
new_total_h = tf.concat([new_h, hyper_h], 1)
new_total_c = tf.concat([new_c, hyper_c], 1)
new_total_state = tf.concat([new_total_h, new_total_c], 1)
return new_h, new_total_state
| 33.186373
| 80
| 0.634964
| 2,423
| 16,560
| 4.089971
| 0.134957
| 0.046821
| 0.03996
| 0.011806
| 0.549647
| 0.492936
| 0.46448
| 0.442482
| 0.412714
| 0.365691
| 0
| 0.019692
| 0.254831
| 16,560
| 498
| 81
| 33.253012
| 0.783387
| 0.195411
| 0
| 0.511494
| 0
| 0
| 0.021418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077586
| false
| 0
| 0.017241
| 0.025862
| 0.178161
| 0.002874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc412db90075a83ae4e5731ee32b0fb7611791ff
| 6,034
|
py
|
Python
|
src/cogent3/cluster/UPGMA.py
|
u6052029/cogent3
|
ca0efcb7f60b715bcbfbecd924cdb98a53cefe20
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/cluster/UPGMA.py
|
u6052029/cogent3
|
ca0efcb7f60b715bcbfbecd924cdb98a53cefe20
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/cluster/UPGMA.py
|
u6052029/cogent3
|
ca0efcb7f60b715bcbfbecd924cdb98a53cefe20
|
[
"BSD-3-Clause"
] | null | null | null |
# usr/bin/env python
"""Functions to cluster using UPGMA
upgma takes an dictionary of pair tuples mapped to distances as input.
UPGMA_cluster takes an array and a list of PhyloNode objects corresponding
to the array as input. Can also generate this type of input from a DictArray using
inputs_from_dict_array function.
Both return a PhyloNode object of the UPGMA cluster
"""
import numpy
from numpy import argmin, array, average, diag, ma, ravel, sum, take
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray
__author__ = "Catherine Lozupone"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Catherine Lozuopone", "Rob Knight", "Peter Maxwell"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Catherine Lozupone"
__email__ = "lozupone@colorado.edu"
__status__ = "Production"
numerictypes = numpy.core.numerictypes.sctype2char
Float = numerictypes(float)
BIG_NUM = 1e305
def upgma(pairwise_distances):
"""Uses the UPGMA algorithm to cluster sequences
pairwise_distances: a dictionary with pair tuples mapped to a distance
returns a PhyloNode object of the UPGMA cluster
"""
darr = DictArray(pairwise_distances)
matrix_a, node_order = inputs_from_dict_array(darr)
tree = UPGMA_cluster(matrix_a, node_order, BIG_NUM)
index = 0
for node in tree.traverse():
if not node.parent:
node.name = "root"
elif not node.name:
node.name = "edge." + str(index)
index += 1
return tree
def find_smallest_index(matrix):
"""returns the index of the smallest element in a numpy array
for UPGMA clustering elements on the diagonal should first be
substituted with a very large number so that they are always
larger than the rest if the values in the array."""
# get the shape of the array as a tuple (e.g. (3,3))
shape = matrix.shape
# turn into a 1 by x array and get the index of the lowest number
matrix1D = ravel(matrix)
lowest_index = argmin(matrix1D)
# convert the lowest_index derived from matrix1D to one for the original
# square matrix and return
row_len = shape[0]
return divmod(lowest_index, row_len)
def condense_matrix(matrix, smallest_index, large_value):
"""converges the rows and columns indicated by smallest_index
Smallest index is returned from find_smallest_index.
For both the rows and columns, the values for the two indices are
averaged. The resulting vector replaces the first index in the array
and the second index is replaced by an array with large numbers so that
it is never chosen again with find_smallest_index.
"""
first_index, second_index = smallest_index
# get the rows and make a new vector that has their average
rows = take(matrix, smallest_index, 0)
new_vector = average(rows, 0)
# replace info in the row and column for first index with new_vector
matrix[first_index] = new_vector
matrix[:, first_index] = new_vector
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
matrix[second_index] = large_value
matrix[:, second_index] = large_value
return matrix
def condense_node_order(matrix, smallest_index, node_order):
"""condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrix
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrix"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# lengthproperty of each node
distance = matrix[index1, index2]
nodes = [node1, node2]
d = distance / 2.0
for n in nodes:
if n.children:
n.length = d - n.children[0].TipLength
else:
n.length = d
n.TipLength = d
# combine the two nodes into a new PhyloNode object
new_node = PhyloNode()
new_node.children.append(node1)
new_node.children.append(node2)
node1.parent = new_node
node2.parent = new_node
# replace the object at index1 with the combined node
node_order[index1] = new_node
# replace the object at index2 with None
node_order[index2] = None
return node_order
def UPGMA_cluster(matrix, node_order, large_number):
"""cluster with UPGMA
matrix is a numpy array.
node_order is a list of PhyloNode objects corresponding to the matrix.
large_number will be assigned to the matrix during the process and
should be much larger than any value already in the matrix.
WARNING: Changes matrix in-place.
WARNING: Expects matrix to already have diagonals assigned to large_number
before this function is called.
"""
num_entries = len(node_order)
tree = None
for i in range(num_entries - 1):
smallest_index = find_smallest_index(matrix)
index1, index2 = smallest_index
# if smallest_index is on the diagonal set the diagonal to large_number
if index1 == index2:
matrix[diag([True] * len(matrix))] = large_number
smallest_index = find_smallest_index(matrix)
row_order = condense_node_order(matrix, smallest_index, node_order)
matrix = condense_matrix(matrix, smallest_index, large_number)
tree = node_order[smallest_index[0]]
return tree
def inputs_from_dict_array(darr):
"""makes inputs for UPGMA_cluster from a DictArray object
"""
darr.array += numpy.eye(darr.shape[0]) * BIG_NUM
nodes = list(map(PhyloNode, darr.keys()))
return darr.array, nodes
| 36.569697
| 82
| 0.716937
| 881
| 6,034
| 4.755959
| 0.270148
| 0.07136
| 0.024344
| 0.013604
| 0.187351
| 0.129594
| 0.082339
| 0.04105
| 0
| 0
| 0
| 0.01385
| 0.222241
| 6,034
| 164
| 83
| 36.792683
| 0.878969
| 0.46934
| 0
| 0.075
| 0
| 0
| 0.05625
| 0.006908
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.05
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc417b4336f77e529dd64d425d37722b3edade09
| 1,007
|
py
|
Python
|
module1/api.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | null | null | null |
module1/api.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | null | null | null |
module1/api.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | null | null | null |
"""API for AVB"""
import json
import sys
import requests
def actualite_found ():
osm = "https://opendata.bruxelles.be/api/datasets/1.0/search/?q="
data = {
"nhits":0,
"parameters":{
"dataset":"actualites-ville-de-bruxelles",
"timezone":"UTC",
"q":"actualite",
"language": "fr",
"rows":10,
"start":0,
"sort":[
"published"
]
,
"format":"json"
}
,
"records":[]
}
resp = requests.get(osm, data)
if resp.status_code == 200:
print(resp.json()["datasets"][0]["metas"])
else:
print("actualite not found")
return resp
def get_result(resp,n,attribut):
metas = resp.json()["datasets"][n]["metas"]
return metas[attribut]
def nb_result(resp):
return len(resp.json()["datasets"])
#Example of use
if __name__ == "__main__":
resp = actualite_found()
result = get_result(resp,2,"description")
print(result)
print(nb_result(resp))
| 19.365385
| 69
| 0.5571
| 115
| 1,007
| 4.747826
| 0.530435
| 0.07326
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014946
| 0.269116
| 1,007
| 52
| 70
| 19.365385
| 0.726902
| 0.025819
| 0
| 0
| 0
| 0
| 0.256148
| 0.029713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.025641
| 0.230769
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc422cd23ef7241b5d35bfeb10b87ff16ba77128
| 7,782
|
py
|
Python
|
improver/cli/nbhood.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 77
|
2017-04-26T07:47:40.000Z
|
2022-03-31T09:40:49.000Z
|
improver/cli/nbhood.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 1,440
|
2017-03-29T10:04:15.000Z
|
2022-03-28T10:11:29.000Z
|
improver/cli/nbhood.py
|
MoseleyS/improver
|
ca028e3a1c842e3ff00b188c8ea6eaedd0a07149
|
[
"BSD-3-Clause"
] | 72
|
2017-03-17T16:53:45.000Z
|
2022-02-16T09:41:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run neighbourhood processing."""
from improver import cli
from improver.constants import DEFAULT_PERCENTILES
@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube,
mask: cli.inputcube = None,
*,
neighbourhood_output,
neighbourhood_shape,
radii: cli.comma_separated_list,
lead_times: cli.comma_separated_list = None,
degrees_as_complex=False,
weighted_mode=False,
area_sum=False,
remask=False,
percentiles: cli.comma_separated_list = DEFAULT_PERCENTILES,
halo_radius: float = None,
):
"""Runs neighbourhood processing.
Apply the requested neighbourhood method via the
NeighbourhoodProcessing plugin to a Cube.
Args:
cube (iris.cube.Cube):
The Cube to be processed.
mask (iris.cube.Cube):
A cube to mask the input cube. The data should contain 1 for
usable points and 0 for discarded points.
Only supported with square neighbourhoods. (Optional)
neighbourhood_output (str):
The form of the results generated using neighbourhood processing.
If "probabilities" is selected, the mean probability with a
neighbourhood is calculated. If "percentiles" is selected, then
the percentiles are calculated with a neighbourhood. Calculating
percentiles from a neighbourhood is only supported for a circular
neighbourhood.
Options: "probabilities", "percentiles".
neighbourhood_shape (str):
Name of the neighbourhood method to use. Only a "circular"
neighbourhood shape is applicable for calculating "percentiles"
output.
Options: "circular", "square".
radii (list of float):
The radius or a list of radii in metres of the neighbourhood to
apply.
If it is a list, it must be the same length as lead_times, which
defines at which lead time to use which nbhood radius. The radius
will be interpolated for intermediate lead times.
lead_times (list of int):
The lead times in hours that correspond to the radii to be used.
If lead_times are set, radii must be a list the same length as
lead_times.
degrees_as_complex (bool):
Include this option to process angles as complex numbers.
Not compatible with circular kernel or percentiles.
weighted_mode (bool):
Include this option to set the weighting to decrease with radius.
Otherwise a constant weighting is assumed.
weighted_mode is only applicable for calculating "probability"
neighbourhood output using the circular kernel.
area_sum (bool):
Return sum rather than fraction over the neighbourhood area.
remask (bool):
Include this option to apply the original un-neighbourhood
processed mask to the neighbourhood processed cube.
Otherwise the original un-neighbourhood processed mask
is not applied. Therefore, the neighbourhood processing may result
in values being present in area that were originally masked.
percentiles (float):
Calculates value at the specified percentiles from the
neighbourhood surrounding each grid point. This argument has no
effect if the output is probabilities.
halo_radius (float):
Set this radius in metres to define the excess halo to clip. Used
where a larger grid was defined than the standard grid and we want
to clip the grid back to the standard grid. Otherwise no clipping
is applied.
Returns:
iris.cube.Cube:
A processed Cube.
Raises:
RuntimeError:
If weighted_mode is used with the wrong neighbourhood_output.
RuntimeError:
If degree_as_complex is used with
neighbourhood_output='percentiles'.
RuntimeError:
If degree_as_complex is used with neighbourhood_shape='circular'.
"""
from improver.nbhood import radius_by_lead_time
from improver.nbhood.nbhood import (
GeneratePercentilesFromANeighbourhood,
NeighbourhoodProcessing,
)
from improver.utilities.pad_spatial import remove_cube_halo
from improver.wind_calculations.wind_direction import WindDirection
sum_or_fraction = "sum" if area_sum else "fraction"
if neighbourhood_output == "percentiles":
if weighted_mode:
raise RuntimeError(
"weighted_mode cannot be used with" 'neighbourhood_output="percentiles"'
)
if degrees_as_complex:
raise RuntimeError("Cannot generate percentiles from complex " "numbers")
if neighbourhood_shape == "circular":
if degrees_as_complex:
raise RuntimeError(
"Cannot process complex numbers with circular neighbourhoods"
)
if degrees_as_complex:
# convert cube data into complex numbers
cube.data = WindDirection.deg_to_complex(cube.data)
radius_or_radii, lead_times = radius_by_lead_time(radii, lead_times)
if neighbourhood_output == "probabilities":
result = NeighbourhoodProcessing(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
weighted_mode=weighted_mode,
sum_or_fraction=sum_or_fraction,
re_mask=remask,
)(cube, mask_cube=mask)
elif neighbourhood_output == "percentiles":
result = GeneratePercentilesFromANeighbourhood(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
percentiles=percentiles,
)(cube)
if degrees_as_complex:
# convert neighbourhooded cube back to degrees
result.data = WindDirection.complex_to_deg(result.data)
if halo_radius is not None:
result = remove_cube_halo(result, halo_radius)
return result
| 42.758242
| 88
| 0.681187
| 936
| 7,782
| 5.560897
| 0.313034
| 0.022478
| 0.018444
| 0.013833
| 0.151777
| 0.104899
| 0.080692
| 0.064938
| 0.064938
| 0.026129
| 0
| 0.001912
| 0.260601
| 7,782
| 181
| 89
| 42.994475
| 0.902676
| 0.648933
| 0
| 0.206349
| 0
| 0
| 0.094527
| 0.014096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.095238
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc461d0fe4c1ef7384477f1e053ae3080c54c6a9
| 1,541
|
py
|
Python
|
donation/migrations/0043_auto_20180109_0012.py
|
effective-altruism-australia/donation-portal
|
45fe58edc44d0c4444b493e4ac025fc53897c799
|
[
"MIT"
] | 1
|
2019-04-23T01:29:26.000Z
|
2019-04-23T01:29:26.000Z
|
donation/migrations/0043_auto_20180109_0012.py
|
effective-altruism-australia/donation-portal
|
45fe58edc44d0c4444b493e4ac025fc53897c799
|
[
"MIT"
] | 68
|
2017-02-10T21:33:39.000Z
|
2019-06-22T13:40:02.000Z
|
donation/migrations/0043_auto_20180109_0012.py
|
effective-altruism-australia/donation-portal
|
45fe58edc44d0c4444b493e4ac025fc53897c799
|
[
"MIT"
] | 5
|
2016-11-08T01:35:47.000Z
|
2020-12-08T07:32:34.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def copy_existing_referrals_into_new_field(apps, schema_editor):
Pledge = apps.get_model('donation', 'Pledge')
Referral = apps.get_model('donation', 'Referral')
reasons = Pledge.objects.values_list('how_did_you_hear_about_us', flat=True).distinct()
for reason in reasons:
if reason: # Filter out None and u''
Referral.objects.create(reason=reason)
for pledge in Pledge.objects.all():
reason = pledge.how_did_you_hear_about_us
if reason:
pledge.how_did_you_hear_about_us_db = Referral.objects.get(reason=reason)
pledge.save()
class Migration(migrations.Migration):
dependencies = [
('donation', '0042_amend_donation_view'),
]
operations = [
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reason', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='pledge',
name='how_did_you_hear_about_us_db',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='How did you hear about us?', blank=True, to='donation.Referral', null=True),
),
migrations.RunPython(copy_existing_referrals_into_new_field)
]
| 35.022727
| 171
| 0.658663
| 184
| 1,541
| 5.23913
| 0.461957
| 0.03112
| 0.046681
| 0.067427
| 0.209544
| 0.209544
| 0.120332
| 0.06639
| 0
| 0
| 0
| 0.006734
| 0.229072
| 1,541
| 43
| 172
| 35.837209
| 0.804714
| 0.029202
| 0
| 0.117647
| 0
| 0
| 0.121902
| 0.051574
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc48029eb6bc6d9c3b97d0e2970ae2bc11eb162e
| 5,626
|
py
|
Python
|
graph_search/week2/assignment_dijkstra_shortest_paths.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | null | null | null |
graph_search/week2/assignment_dijkstra_shortest_paths.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | null | null | null |
graph_search/week2/assignment_dijkstra_shortest_paths.py
|
liaoaoyuan97/standford_algorithms_specialization
|
2914fdd397ce895d986ac855e78afd7a51ceff68
|
[
"MIT"
] | 1
|
2021-01-18T19:35:48.000Z
|
2021-01-18T19:35:48.000Z
|
import heapq
import time
from os import path
from math import floor
class Heap:
def __init__(self):
self.size = 0
self.array = []
self.v2index_map = {}
def __get_parent_index(self, idx):
return int(floor((idx - 1) / 2))
def __get_left_child_index(self, idx):
return 2 * idx + 1
def __get_right_child_index(self, idx):
return 2 * idx + 2
def __swap_value(self, i, j):
t = self.array[i]
self.v2index_map[t[0]] = j
self.v2index_map[self.array[j][0]] = i
self.array[i] = self.array[j]
self.array[j] = t
def __bubble_up(self, idx):
parent_idx = self.__get_parent_index(idx)
while parent_idx >= 0:
if self.array[parent_idx][1] <= self.array[idx][1]:
break
self.__swap_value(parent_idx, idx)
idx = parent_idx
parent_idx = self.__get_parent_index(idx)
def __bubble_down(self, idx):
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
while left_idx < self.size or right_idx < self.size:
min_idx = left_idx
if left_idx >= self.size or (right_idx < self.size and self.array[right_idx][1] < self.array[left_idx][1]):
min_idx = right_idx
if self.array[idx][1] < self.array[min_idx][1]:
break
self.__swap_value(idx, min_idx)
idx = min_idx
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
def get_vertex_key(self, v_id):
return self.array[self.v2index_map[v_id]][1]
def pop(self):
if self.size < 1:
raise IndexError
min_node = self.array[0]
self.size = self.size - 1
self.__swap_value(0, self.size)
self.array.pop()
if self.size > 1:
self.__bubble_down(0)
del self.v2index_map[min_node[0]]
return min_node
def insert(self, node):
self.array.append(node)
self.v2index_map[node[0]] = self.size
self.size = self.size + 1
if self.size > 1:
self.__bubble_up(self.size - 1)
def modify_key(self, v_id, update_val):
idx = self.v2index_map[v_id]
self.array[idx] = (v_id, update_val)
parent_idx = self.__get_parent_index(idx)
if parent_idx >= 0 and self.array[idx][1] < self.array[parent_idx][1]:
self.__bubble_up(idx)
else:
self.__bubble_down(idx)
def read_graph(filename):
graph = dict()
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
edges = row.strip('\t\n').split('\t')
s = int(edges[0])
graph[s] = []
for i in range(1, len(edges)):
edge = edges[i].split(',')
graph[s].append((int(edge[0]), int(edge[1])))
return graph
def get_shortest_paths_heapq(graph):
heap = []
heapq.heappush(heap, (0, 1)) # (dj_score, vertex_id)
distances = {i: 1000000 for i in graph}
distances[1] = 0
X = []
while heap:
cur_distance, cur_v = heapq.heappop(heap)
if cur_distance > distances[cur_v]:
continue
# added to X
X.append(cur_v)
for neighbor, weight in graph[cur_v]:
dj_score = cur_distance + weight
if dj_score < distances[neighbor]:
distances[neighbor] = dj_score
heapq.heappush(heap, (dj_score, neighbor))
return distances, X
def get_shortest_paths_self_defined_heap(graph):
heap = Heap()
heap.insert((1, 0)) # (vertex_id, dj_score)
for v in graph:
if v != 1:
heap.insert((v, 1000000))
shortest_paths = dict()
n_v = len(graph)
while len(shortest_paths) < n_v:
assert len(shortest_paths) + heap.size == n_v
cur_v, v_score = heap.pop()
shortest_paths[cur_v] = v_score
for neighbor, weight in graph[cur_v]:
dj_score = v_score + weight
# import pdb;pdb.set_trace()
if neighbor not in shortest_paths and dj_score < heap.get_vertex_key(neighbor):
heap.modify_key(neighbor, dj_score)
return shortest_paths
if __name__ == "__main__":
# test case 1, output: {1: 0, 2: 1, 3: 2, 4: 2, 5: 3, 6: 4}
# graph = {
# 1: [(6, 7), (5, 3), (2, 1), (4, 2), (3, 3)],
# 2: [(1, 1), (3, 1), (4, 1), (6, 6)],
# 3: [(1, 3), (2, 1), (6, 2)],
# 4: [(2, 1), (1, 2), (6, 5)],
# 5: [(1, 3), (6, 3)],
# 6: [(1, 7), (3, 2), (2, 6), (4, 5), (5, 3)]
# }
graph = read_graph("Dijkstra.txt")
dedup_edges = set()
for k, _ in graph.items():
for v in _:
dedup_edges.add((k, v[0], v[1]))
dedup_edges.add((v[0], k, v[1]))
assert len(dedup_edges) == sum([len(e) for e in graph.values()])
# graph = {}
# heap = Heap()
# heap.insert((1,0))
# heap.insert((2,0))
# heap.pop()
start_t = time.time()
min_distances,X = get_shortest_paths_heapq(graph)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
start_t = time.time()
min_distances = get_shortest_paths_self_defined_heap(graph, X)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
| 27.714286
| 119
| 0.551369
| 824
| 5,626
| 3.515777
| 0.161408
| 0.059027
| 0.033828
| 0.01795
| 0.383155
| 0.327235
| 0.25164
| 0.182258
| 0.159475
| 0.11253
| 0
| 0.046691
| 0.307145
| 5,626
| 202
| 120
| 27.851485
| 0.696511
| 0.088162
| 0
| 0.159091
| 0
| 0
| 0.006068
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 1
| 0.106061
| false
| 0
| 0.030303
| 0.030303
| 0.204545
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc480677b321e1843fe0812d2b7ce6bbeeb5090e
| 4,345
|
py
|
Python
|
ssod/utils/structure_utils.py
|
huimlight/SoftTeacher
|
97064fbcce1ab87b40977544ba7a9c488274d66f
|
[
"MIT"
] | 604
|
2021-08-09T03:00:35.000Z
|
2022-03-31T13:43:14.000Z
|
ssod/utils/structure_utils.py
|
huimlight/SoftTeacher
|
97064fbcce1ab87b40977544ba7a9c488274d66f
|
[
"MIT"
] | 158
|
2021-08-29T07:58:22.000Z
|
2022-03-31T15:23:27.000Z
|
ssod/utils/structure_utils.py
|
huimlight/SoftTeacher
|
97064fbcce1ab87b40977544ba7a9c488274d66f
|
[
"MIT"
] | 92
|
2021-08-24T07:29:37.000Z
|
2022-03-29T03:01:34.000Z
|
import warnings
from collections import Counter, Mapping, Sequence
from numbers import Number
from typing import Dict, List
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
_step_counter = Counter()
def list_concat(data_list: List[list]):
if isinstance(data_list[0], torch.Tensor):
return torch.cat(data_list)
else:
endpoint = [d for d in data_list[0]]
for i in range(1, len(data_list)):
endpoint.extend(data_list[i])
return endpoint
def sequence_concat(a, b):
if isinstance(a, Sequence) and isinstance(b, Sequence):
return a + b
else:
return None
def dict_concat(dicts: List[Dict[str, list]]):
return {k: list_concat([d[k] for d in dicts]) for k in dicts[0].keys()}
def dict_fuse(obj_list, reference_obj):
if isinstance(reference_obj, torch.Tensor):
return torch.stack(obj_list)
return obj_list
def dict_select(dict1: Dict[str, list], key: str, value: str):
flag = [v == value for v in dict1[key]]
return {
k: dict_fuse([vv for vv, ff in zip(v, flag) if ff], v) for k, v in dict1.items()
}
def dict_split(dict1, key):
group_names = list(set(dict1[key]))
dict_groups = {k: dict_select(dict1, key, k) for k in group_names}
return dict_groups
def dict_sum(a, b):
if isinstance(a, dict):
assert isinstance(b, dict)
return {k: dict_sum(v, b[k]) for k, v in a.items()}
elif isinstance(a, list):
assert len(a) == len(b)
return [dict_sum(aa, bb) for aa, bb in zip(a, b)]
else:
return a + b
def zero_like(tensor_pack, prefix=""):
if isinstance(tensor_pack, Sequence):
return [zero_like(t) for t in tensor_pack]
elif isinstance(tensor_pack, Mapping):
return {prefix + k: zero_like(v) for k, v in tensor_pack.items()}
elif isinstance(tensor_pack, torch.Tensor):
return tensor_pack.new_zeros(tensor_pack.shape)
elif isinstance(tensor_pack, np.ndarray):
return np.zeros_like(tensor_pack)
else:
warnings.warn("Unexpected data type {}".format(type(tensor_pack)))
return 0
def pad_stack(tensors, shape, pad_value=255):
tensors = torch.stack(
[
F.pad(
tensor,
pad=[0, shape[1] - tensor.shape[1], 0, shape[0] - tensor.shape[0]],
value=pad_value,
)
for tensor in tensors
]
)
return tensors
def result2bbox(result):
num_class = len(result)
bbox = np.concatenate(result)
if bbox.shape[0] == 0:
label = np.zeros(0, dtype=np.uint8)
else:
label = np.concatenate(
[[i] * len(result[i]) for i in range(num_class) if len(result[i]) > 0]
).reshape((-1,))
return bbox, label
def result2mask(result):
num_class = len(result)
mask = [np.stack(result[i]) for i in range(num_class) if len(result[i]) > 0]
if len(mask) > 0:
mask = np.concatenate(mask)
else:
mask = np.zeros((0, 1, 1))
return BitmapMasks(mask, mask.shape[1], mask.shape[2]), None
def sequence_mul(obj, multiplier):
if isinstance(obj, Sequence):
return [o * multiplier for o in obj]
else:
return obj * multiplier
def is_match(word, word_list):
for keyword in word_list:
if keyword in word:
return True
return False
def weighted_loss(loss: dict, weight, ignore_keys=[], warmup=0):
_step_counter["weight"] += 1
lambda_weight = (
lambda x: x * (_step_counter["weight"] - 1) / warmup
if _step_counter["weight"] <= warmup
else x
)
if isinstance(weight, Mapping):
for k, v in weight.items():
for name, loss_item in loss.items():
if (k in name) and ("loss" in name):
loss[name] = sequence_mul(loss[name], lambda_weight(v))
elif isinstance(weight, Number):
for name, loss_item in loss.items():
if "loss" in name:
if not is_match(name, ignore_keys):
loss[name] = sequence_mul(loss[name], lambda_weight(weight))
else:
loss[name] = sequence_mul(loss[name], 0.0)
else:
raise NotImplementedError()
return loss
| 28.214286
| 88
| 0.607595
| 621
| 4,345
| 4.132045
| 0.201288
| 0.042868
| 0.007794
| 0.010912
| 0.128995
| 0.093141
| 0.082619
| 0.082619
| 0.030398
| 0.030398
| 0
| 0.012698
| 0.275029
| 4,345
| 153
| 89
| 28.398693
| 0.801905
| 0
| 0
| 0.125
| 0
| 0
| 0.011277
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.116667
| false
| 0
| 0.066667
| 0.008333
| 0.391667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc4a04571ae8ad033810ff66b391deb8c9d55bed
| 1,642
|
py
|
Python
|
dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
#! /usr/bin/env python
# encoding: utf-8
# harald at klimachs.de
import re
from waflib import Utils,Errors
from waflib.Tools import fc,fc_config,fc_scan
from waflib.Configure import conf
from waflib.Tools.compiler_fc import fc_compiler
fc_compiler['aix'].insert(0, 'fc_xlf')
@conf
def find_xlf(conf):
"""Find the xlf program (will look in the environment variable 'FC')"""
fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC')
fc = conf.cmd_to_list(fc)
conf.get_xlf_version(fc)
conf.env.FC_NAME='XLF'
@conf
def xlf_flags(conf):
v = conf.env
v['FCDEFINES_ST'] = '-WF,-D%s'
v['FCFLAGS_fcshlib'] = ['-qpic=small']
v['FCFLAGS_DEBUG'] = ['-qhalt=w']
v['LINKFLAGS_fcshlib'] = ['-Wl,-shared']
@conf
def xlf_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None)
if xlf_modifier_func:
xlf_modifier_func()
@conf
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
def configure(conf):
conf.find_xlf()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.xlf_flags()
conf.xlf_modifier_platform()
| 25.261538
| 115
| 0.693057
| 269
| 1,642
| 4.037175
| 0.394052
| 0.060773
| 0.041436
| 0.027624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013399
| 0.136419
| 1,642
| 64
| 116
| 25.65625
| 0.752468
| 0.091352
| 0
| 0.083333
| 0
| 0
| 0.210419
| 0.020974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc519cd073372b79ff5e315d6c117f1de77e8ef5
| 602
|
py
|
Python
|
examples/bathymetricGradient.py
|
usgs/water-datapreptools
|
49c852a0c189e142a351331ba6e0d1ef9e7a408b
|
[
"CC0-1.0"
] | 2
|
2021-06-22T18:18:47.000Z
|
2021-09-25T18:16:26.000Z
|
examples/bathymetricGradient.py
|
usgs/water-datapreptools
|
49c852a0c189e142a351331ba6e0d1ef9e7a408b
|
[
"CC0-1.0"
] | null | null | null |
examples/bathymetricGradient.py
|
usgs/water-datapreptools
|
49c852a0c189e142a351331ba6e0d1ef9e7a408b
|
[
"CC0-1.0"
] | null | null | null |
import sys
sys.path.append("..") # change environment to see tools
from make_hydrodem import bathymetricGradient
workspace = r"" # path to geodatabase to use as a workspace
snapGrid = r"" # path to snapping grid
hucPoly = r"" # path to local folder polygon
hydrographyArea = r"" # path to NHD area feature class
hydrographyFlowline = r"" # path to NHD flowline feature class
hydrographyWaterbody = r"" # path to NHD water body feature class
cellsize = '' # cell size
bathymetricGradient(workspace, snapGrid, hucPoly, hydrographyArea,
hydrographyFlowline, hydrographyWaterbody,cellsize)
| 43
| 67
| 0.757475
| 74
| 602
| 6.148649
| 0.527027
| 0.065934
| 0.092308
| 0.065934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167774
| 602
| 14
| 68
| 43
| 0.908184
| 0.393688
| 0
| 0
| 0
| 0
| 0.005814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc52596785d1ffc33b3982ed9e7fa9443b9fefb7
| 9,799
|
py
|
Python
|
out/flowContext.py
|
hxb1997/Menge
|
7a09a6236d8eef23e3d15d08873d5918d064761b
|
[
"Apache-2.0"
] | null | null | null |
out/flowContext.py
|
hxb1997/Menge
|
7a09a6236d8eef23e3d15d08873d5918d064761b
|
[
"Apache-2.0"
] | null | null | null |
out/flowContext.py
|
hxb1997/Menge
|
7a09a6236d8eef23e3d15d08873d5918d064761b
|
[
"Apache-2.0"
] | 1
|
2021-07-01T09:40:01.000Z
|
2021-07-01T09:40:01.000Z
|
# This is the OpenGL context for drawing flow calculation lines
from Context import *
from primitives import Vector2, Segment
from OpenGL.GL import *
from copy import deepcopy
class GLFlowSegment( Segment ):
'''The OpenGL representation of a flow line. Basically a segment
with a direciton indicator. The direction indicator shows which
way flow is expected to cross the line. The flow direction is to
the RIGHT of the segment. The forward direction is the direction
from p1 to p2.'''
def __init__( self, p1, p2 ):
'''Constructor.
@param p1 An instance of Vector2. The start point of the segment.
@param p2 An instance of Vector2. The end point of the segment.
'''
Segment.__init__( self, p1, p2 )
def __str__( self ):
return "GLFlowSegment (%s, %s)" % ( self.p1, self.p2 )
def __repr__( self ):
return str( self )
def drawGL( self, color=(0.1, 1.0, 0.1) ):
'''Draw the flow segment into a GL context.
@param A 3-tuple of floats. The color of the line.
All values should lie in the range [0, 1], to be
interpreted as r, g, b color values.
'''
glPushAttrib( GL_COLOR_BUFFER_BIT )
glBegin( GL_LINES )
glColor3fv( color )
glVertex2f( self.p1.x, self.p1.y )
glVertex2f( self.p2.x, self.p2.y )
mp = self.midPoint()
l = self.magnitude()
n = self.normal() * (0.25 * l )
end = mp + n
glVertex2f( mp.x, mp.y )
glVertex2f( end.x, end.y )
glEnd()
glPopAttrib()
class FlowLineContext( BaseContext ):
'''Context for drawing, creating and editing lines'''
MIN_LINE_LENGTH = 2 # the minimum drag required to draw a line
# edit state - used for knowing what to do with the active line and cancellation
NO_EDIT = 0
EDIT = 1
ADD = 2
def __init__( self, cancelCB=None, editCB=None ):
'''Constructor.
@param cancelCB A callable. An optional callback object
for when flow line drawing is canceled.
@param editCB A callable. An optional callback object
for when a flow line values are edited.
'''
BaseContext.__init__( self )
self.lines = []
self.names = []
self.activeID = -1 # the line currently affected by modifications
self.editState = self.NO_EDIT
self.cancelCB = cancelCB
self.editCB = editCB
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def copy( self, context ):
'''Copy the state of the given FlowLineContext into this'''
assert( isinstance( context, FlowLineContext ) )
self.clear()
self.names = [ a for a in context.names ]
self.lines = deepcopy( context.lines )
def clear( self ):
'''Clears out all of the lines'''
self.lines = []
self.names = []
self.activeID = -1
self.editState = self.NO_EDIT
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def lineCount( self ):
return len( self.lines )
def getName( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return A string. The stored name.
'''
return self.names[ id ]
def getLine( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return An instance of a FlowLine.
'''
return self.lines[ id ]
def addLine( self ):
'''Causes the context to go into new line mode. Returning the new name.'''
self.canDraw = True
self.editState = self.ADD
self.activeID = -1
self.names.append( 'Line %d' % len( self.names ) )
self.lines.append( GLFlowSegment( Vector2(0, 0), Vector2(0, 0) ) )
self.activeLine = self.lines[-1]
return self.names[-1]
def editLine( self, idx ):
'''Edits the indicated line'''
if ( self.editState == self.ADD): return
if ( idx < 0 ):
self.editState = self.NO_EDIT
self.canDraw = False
self.activeID = -1
else:
self.editState = self.EDIT
self.canDraw = True
self.activeID = idx
def setLineName( self, idx, name ):
'''Sets the name for the line with the given index'''
self.names[ idx ] = name
def deleteLine( self, idx ):
'''Removes a line from the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines.pop( idx )
self.names.pop( idx )
self.activeID = -1
def flipLine( self, idx ):
'''Flips the direction of the line in the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines[ idx ].flip()
def setActive( self, idx ):
'''Sets the active line'''
self.activeID = idx
def stopEdit( self ):
'''Stops the ability to edit'''
self.editState = self.NO_EDIT
self.canDraw = False
def getLineCount( self ):
"""Returns the number of defined lines"""
return len( self.lines )
def setMultiLines( self, names, lines ):
'''Sets the lines in the context with the given names and lines.
It is asserted that len( names ) == len( lines ).
@param names A list of strings. One name per line.
@param lines A list of Segment instaces. One line per name.
'''
self.lines = map( lambda x: GLFlowSegment( x.p1, x.p2 ), lines )
self.names = names
self.activeID = -1
self.editState = self.NO_EDIT
def handleMouse ( self, evt, view ):
"""Detects click, drag, release and creates a line"""
result = ContextResult()
try:
event = self.canonicalEvent( evt )
except ValueError as e:
return result
if ( not self.canDraw ):
return result
if ( event.noModifiers() ):
btn = event.button
eX = event.x
eY = event.y
if ( event.type == MouseEvent.DOWN ): #QtCore.QEvent.MouseButtonPress ):
if ( btn == MouseEvent.LEFT ):
self.downPos = Vector2( eX, eY )
x, y = view.screenToWorld( ( eX, eY ) )
p1 = Vector2( x, y )
self.activeLine = GLFlowSegment( p1, p1 )
result.set( True, True, False )
self.dragging = True
self.notifyEdit( self.activeLine )
elif ( btn == MouseEvent.RIGHT and self.dragging ):
# cancel the edit
if ( self.editState == self.ADD ):
self.editState = self.NO_EDIT
self.lines.pop(-1)
self.names.pop(-1)
if ( not self.cancelCB is None ):
self.cancelCB()
self.notifyEdit( None )
canceled = self.activeLine != None
self.activeLine = None
self.dragging = False
result.set( canceled, canceled, False )
elif ( event.type == MouseEvent.UP ):
if ( btn == MouseEvent.LEFT and self.dragging ):
endPos = Vector2( eX, eY )
if ( (endPos - self.downPos).magnitude() >= self.MIN_LINE_LENGTH ):
if ( self.editState == self.ADD ):
self.activeID = len( self.lines ) - 1
self.lines[self.activeID] = self.activeLine
self.editState = self.EDIT
self.notifyEdit( self.activeLine )
elif ( self.editState == self.EDIT ):
assert( self.activeID > -1 )
self.lines[ self.activeID ] = self.activeLine
self.notifyEdit( self.activeLine )
self.activeLine = None
self.activeLine = None
self.dragging = False
result.set( True, True, False )
elif ( event.type == MouseEvent.MOVE ):
if ( self.dragging ):
x, y = view.screenToWorld( ( eX, eY ) )
p2 = Vector2( x, y )
self.activeLine.p2 = p2
result.set( True, True, False )
self.notifyEdit( self.activeLine )
return result
def notifyEdit( self, line ):
'''Notifies call back of a line that has changed'''
if ( not self.editCB is None ):
self.editCB( line )
def drawGL( self ):
'''Basic lines are drawn in default (green), the active line is drawn in yellow,
and when it is being edited, the original disappears and the new line is drawn in
cyan.'''
if ( self.activeLine ):
self.activeLine.drawGL( ( 0.1, 1.0, 1.0 ) )
elif ( self.activeID > -1 and self.editState != self.ADD ):
self.lines[ self.activeID ].drawGL( ( 1.0, 1.0, 0.1 ) )
for i, line in enumerate( self.lines ):
if ( i == self.activeID ): continue
line.drawGL()
| 37.98062
| 89
| 0.525462
| 1,122
| 9,799
| 4.554367
| 0.221925
| 0.033464
| 0.046575
| 0.022309
| 0.294325
| 0.226223
| 0.179256
| 0.170646
| 0.12407
| 0.108023
| 0
| 0.013866
| 0.381774
| 9,799
| 257
| 90
| 38.128405
| 0.829647
| 0.253087
| 0
| 0.333333
| 0
| 0
| 0.004146
| 0
| 0
| 0
| 0
| 0
| 0.023392
| 1
| 0.128655
| false
| 0
| 0.023392
| 0.017544
| 0.245614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5346e19911a49d8686625f457f771311d07483
| 324
|
py
|
Python
|
Codes/gracekoo/test.py
|
ghoslation/algorithm
|
5708bf89e59a80cd0f50f2e6138f069b4f9bc96e
|
[
"Apache-2.0"
] | 256
|
2017-10-25T13:02:15.000Z
|
2022-02-25T13:47:59.000Z
|
Codes/gracekoo/test.py
|
IYoreI/Algorithm
|
0addf0cda0ec9e3f46c480eeda3a8ecb64c94121
|
[
"Apache-2.0"
] | 56
|
2017-10-27T01:34:20.000Z
|
2022-03-01T00:20:55.000Z
|
Codes/gracekoo/test.py
|
IYoreI/Algorithm
|
0addf0cda0ec9e3f46c480eeda3a8ecb64c94121
|
[
"Apache-2.0"
] | 83
|
2017-10-25T12:51:53.000Z
|
2022-02-15T08:27:03.000Z
|
# -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
| 16.2
| 37
| 0.623457
| 45
| 324
| 4.266667
| 0.755556
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065891
| 0.203704
| 324
| 19
| 38
| 17.052632
| 0.678295
| 0.305556
| 0
| 0
| 0
| 0
| 0.059633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc545ada34aef15e72804247df9cc885de6ee820
| 2,657
|
py
|
Python
|
aiorpcgrid/client.py
|
urands/aiorpcgrid
|
7bc9ee9a80fa843998b2604d7c0803b323628480
|
[
"Apache-2.0"
] | null | null | null |
aiorpcgrid/client.py
|
urands/aiorpcgrid
|
7bc9ee9a80fa843998b2604d7c0803b323628480
|
[
"Apache-2.0"
] | null | null | null |
aiorpcgrid/client.py
|
urands/aiorpcgrid
|
7bc9ee9a80fa843998b2604d7c0803b323628480
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
# from aiorpcgrid.client import Client
from aiorpcgrid.task import AsyncTask, State
class AsyncClient:
_provider = None
_method = None
_requests: dict = {}
_running = True
_request_queue: asyncio.Queue = asyncio.Queue()
_loop = None
def __init__(self, provider, loop=None):
self._provider = provider
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
async def open(self):
await self._provider.open()
asyncio.ensure_future(self.request_loop(), loop=self._loop)
asyncio.ensure_future(self.run(), loop=self._loop)
return self
async def close(self):
self._running = False
await self._provider.close()
await self._request_queue.put(None)
async def request_loop(self):
while self._running:
task = await self._request_queue.get()
if task is not None:
await self.provider.call_method(task)
task.status = State.RUNNING
if self._request_queue.empty():
self._request_queue.task_done()
async def run(self):
while self._running:
responses = await self._provider.recv()
if responses is not None:
for response in responses:
if response.id in self._requests:
task = self._requests[response.id]
task.result = response.result
task.error = response.error
if task.error is None:
self._requests[
response.id
].status = State.COMPLETED
else:
self._requests[response.id].status = State.FAILED
task.event.set()
del self._requests[response.id]
if task._callback is not None:
asyncio.ensure_future(
task.callback(task), loop=self._loop
)
def __call__(self, *args, **kwargs):
if not self.provider.is_connected():
raise ConnectionError(f'Connection lost. {self._provider}')
task = AsyncTask().create(self._method, *args, **kwargs)
if 'parallel' in kwargs:
task._parallel = kwargs['parallel']
self._method = None
task.status = State.PENDING
self._requests[task.id] = task
self._request_queue.put_nowait(self._requests[task.id])
return self._requests[task.id]
| 34.960526
| 77
| 0.546481
| 277
| 2,657
| 5.025271
| 0.249097
| 0.068966
| 0.057471
| 0.063218
| 0.047414
| 0.047414
| 0
| 0
| 0
| 0
| 0
| 0
| 0.368837
| 2,657
| 75
| 78
| 35.426667
| 0.830054
| 0.013549
| 0
| 0.031746
| 0
| 0
| 0.018709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.031746
| 0
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc58e1c32b322dbf5e028fbcbb5c81a4dc6ff07a
| 1,348
|
py
|
Python
|
sopa/src/models/utils.py
|
SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers
|
5ad3cae76c3cc9cec4d347807012e61121ea61b9
|
[
"MIT"
] | 25
|
2021-03-16T13:40:45.000Z
|
2021-08-12T04:54:39.000Z
|
sopa/src/models/utils.py
|
MetaSolver/icml2021
|
619774abe4a834ae371434af8b23379e9524e7da
|
[
"BSD-3-Clause"
] | null | null | null |
sopa/src/models/utils.py
|
MetaSolver/icml2021
|
619774abe4a834ae371434af8b23379e9524e7da
|
[
"BSD-3-Clause"
] | 1
|
2021-03-31T02:58:03.000Z
|
2021-03-31T02:58:03.000Z
|
import numpy as np
import torch
import random
from .odenet_mnist.layers import MetaNODE
def fix_seeds(seed=502):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.set_printoptions(precision=10)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def load_model(path):
(_, state_dict), (_, model_args), (_, slover_id) = torch.load(path, map_location='cpu').items()
is_odenet = model_args.network == 'odenet'
if not hasattr(model_args, 'in_channels'):
model_args.in_channels = 1
model = MetaNODE(downsampling_method=model_args.downsampling_method,
is_odenet=is_odenet,
in_channels=model_args.in_channels)
model.load_state_dict(state_dict)
return model, model_args
| 27.510204
| 99
| 0.635015
| 171
| 1,348
| 4.795322
| 0.426901
| 0.076829
| 0.040244
| 0.069512
| 0.087805
| 0.070732
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.265579
| 1,348
| 49
| 100
| 27.510204
| 0.817172
| 0.03635
| 0
| 0
| 0
| 0
| 0.015456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.114286
| 0
| 0.314286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5b4e12e35ec5a1123e4672989f9b50567b330a
| 3,141
|
py
|
Python
|
jv/test_jv.py
|
chenwang/QuantEcon.lectures.code
|
8832a74acd219a71cb0a99dc63c5e976598ac999
|
[
"BSD-3-Clause"
] | 56
|
2017-05-09T10:45:23.000Z
|
2022-01-20T20:33:27.000Z
|
jv/test_jv.py
|
chenwang/QuantEcon.lectures.code
|
8832a74acd219a71cb0a99dc63c5e976598ac999
|
[
"BSD-3-Clause"
] | 7
|
2017-06-30T01:52:46.000Z
|
2019-05-01T20:09:47.000Z
|
jv/test_jv.py
|
QuantEcon/QuantEcon.lectures.code
|
d61ac7bc54529dd5c77470c17539eb2418b047c9
|
[
"BSD-3-Clause"
] | 117
|
2017-04-25T16:09:17.000Z
|
2022-03-23T02:30:29.000Z
|
"""
@author : Spencer Lyon
"""
from __future__ import division
import sys
import unittest
from nose.plugins.skip import SkipTest
from jv import JvWorker
from quantecon import compute_fixed_point
from quantecon.tests import get_h5_data_file, write_array, max_abs_diff
# specify params -- use defaults
A = 1.4
alpha = 0.6
beta = 0.96
grid_size = 50
if sys.version_info[0] == 2:
v_nm = "V"
else: # python 3
raise SkipTest("Python 3 tests aren't ready.")
v_nm = "V_py3"
def _new_solution(jv, f, grp):
"gets new solution and updates data file"
V = _solve_via_vfi(jv)
write_array(f, grp, V, v_nm)
return V
def _solve_via_vfi(jv):
"compute policy rules via value function iteration"
v_init = jv.x_grid * 0.6
V = compute_fixed_point(jv.bellman_operator, v_init,
max_iter=3000,
error_tol=1e-5)
return V
def _get_vf_guess(jv, force_new=False):
with get_h5_data_file() as f:
# See if the jv group already exists
group_existed = True
try:
jv_group = f.getNode("/jv")
except:
# doesn't exist
group_existed = False
jv_group = f.create_group("/", "jv", "data for jv.py tests")
if force_new or not group_existed:
# group doesn't exist, or forced to create new data.
# This function updates f in place and returns v_vfi, c_vfi, c_pfi
V = _new_solution(jv, f, jv_group)
return V
# if we made it here, the group exists and we should try to read
# existing solutions
try:
# Try reading vfi
if sys.version_info[0] == 2:
V = jv_group.V[:]
else: # python 3
V = jv_group.V_py3[:]
except:
# doesn't exist. Let's create it
V = _new_solution(jv, f, jv_group)
return V
class TestJvWorkder(unittest.TestCase):
@classmethod
def setUpClass(cls):
jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
cls.jv = jv
# compute solution
v_init = _get_vf_guess(jv)
cls.V = compute_fixed_point(jv.bellman_operator, v_init)
cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
return_policies=True)
def test_low_x_prefer_s(self):
"jv: s preferred to phi with low x?"
# low x is an early index
self.assertGreaterEqual(self.s_pol[0], self.phi_pol[0])
def test_high_x_prefer_phi(self):
"jv: phi preferred to s with high x?"
# low x is an early index
self.assertGreaterEqual(self.phi_pol[-1], self.s_pol[-1])
def test_policy_sizes(self):
"jv: policies correct size"
n = self.jv.x_grid.size
self.assertEqual(self.s_pol.size, n)
self.assertEqual(self.phi_pol.size, n)
def test_bellman_sol_fixed_point(self):
"jv: solution to bellman is fixed point"
new_V = self.jv.bellman_operator(self.V)
self.assertLessEqual(max_abs_diff(new_V, self.V), 1e-4)
| 28.044643
| 78
| 0.606176
| 469
| 3,141
| 3.842217
| 0.328358
| 0.027192
| 0.037736
| 0.023307
| 0.147614
| 0.147614
| 0.147614
| 0.126526
| 0.126526
| 0.049945
| 0
| 0.017383
| 0.304043
| 3,141
| 111
| 79
| 28.297297
| 0.806953
| 0.209169
| 0
| 0.197183
| 0
| 0
| 0.104205
| 0
| 0
| 0
| 0
| 0
| 0.070423
| 1
| 0.112676
| false
| 0
| 0.098592
| 0
| 0.28169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5bfb461089e67c5b2c46ef4db3208ad1a8b352
| 9,820
|
py
|
Python
|
excentury/command/config.py
|
LaudateCorpus1/excentury
|
8d0f20bb3e543382170e042fac51a56377c4024b
|
[
"BSD-2-Clause"
] | null | null | null |
excentury/command/config.py
|
LaudateCorpus1/excentury
|
8d0f20bb3e543382170e042fac51a56377c4024b
|
[
"BSD-2-Clause"
] | null | null | null |
excentury/command/config.py
|
LaudateCorpus1/excentury
|
8d0f20bb3e543382170e042fac51a56377c4024b
|
[
"BSD-2-Clause"
] | 1
|
2021-12-31T13:24:16.000Z
|
2021-12-31T13:24:16.000Z
|
"""Config
This module is in charge of providing all the necessary settings to
the rest of the modules in excentury.
"""
import os
import re
import sys
import textwrap
import argparse
from collections import OrderedDict
from excentury.command import error, trace, import_mod
DESC = """Edit a configuration file for excentury.
Some actions performed by excentury can be overwritten by using
configuration files.
To see the values that the configuration file can overwrite use the
`defaults` command. This will print a list of the keys and values
excentury uses for the given command.
"""
RE = re.compile(r'\${(?P<key>.*?)}')
RE_IF = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]'
)
RE_IFELSE = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]ELSE (?P<iffalse>.*)'
)
def disp(msg):
"""Wrapper around sys.stdout.write which is meant to behave as
the print function but it does not add the newline character. """
sys.stdout.write(msg)
def _replacer(*key_val):
"""Helper function for replace.
Source: <http://stackoverflow.com/a/15221068/788553>
"""
replace_dict = dict(key_val)
replacement_function = lambda match: replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, _ in key_val]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def replace(string, *key_val):
"""Replacement of strings done in one pass. Example:
>>> replace("a < b && b < c", ('<', '<'), ('&', '&'))
'a < b && b < c'
Source: <http://stackoverflow.com/a/15221068/788553>
"""
return _replacer(*key_val)(string)
class ConfigDispAction(argparse.Action): # pylint: disable=R0903
"""Derived argparse Action class to use when displaying the
configuration file and location."""
def __call__(self, parser, namespace, values, option_string=None):
try:
read_config(namespace)
except IOError:
disp('xcpp.config not found in %r\n' % namespace.cfg)
else:
disp('path to xcpp.config: "%s"\n' % namespace.cfg)
with open('%s/xcpp.config' % namespace.cfg, 'r') as _fp:
disp(_fp.read())
exit(0)
def add_parser(subp, raw):
"Add a parser to the main subparser. "
tmpp = subp.add_parser('config', help='configure excentury',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('var', type=str, nargs='?', default=None,
help='Must be in the form of sec.key')
tmpp.add_argument('-v', action='store_true',
help='print config file location')
tmpp.add_argument('--print', action=ConfigDispAction,
nargs=0,
help='print config file and exit')
def _get_replacements(tokens, data, sec):
"""Helper function for _read_config. """
replacements = list()
for token in tokens:
if ':' in token:
tsec, tkey = token.split(':')
tval = ''
if tsec in data:
if tkey in data[tsec]:
tval = data[tsec][tkey]
else:
if token in data[sec]:
tval = data[sec][token]
else:
tval = ''
replacements.append(
('${%s}' % token, tval)
)
return replacements
# pylint: disable=invalid-name
# ARG and CFG are names that may be used in the configuration file.
# ARG gives us access to the command line arguments and CFG gives us
# access to the current configuration. Note that using CFG[key][sec]
# is equivalent to ${key:sec}. These names go against the convention
# so that they may be easy to spot in a configuration file.
def _eval_condition(cond, ARG, CFG, line_num, fname):
"""Evaluates a string using the eval function. It prints a
warning if there are any errors. Returns the result of the
evaluation and an error number: 0 if everything is fine, 1 if
there was an error. """
ARG.FILEPATH = '%s/%s/%s' % (ARG.cfg, CFG['xcpp']['path'], ARG.inputfile)
try:
# pylint: disable=eval-used
# To be able to evaluate a condition without creating a whole
# new parser we can use the eval function. We could have use
# a python file as a configuration but then there would be
# no simple structure to the files.
cond = eval(cond)
enum = 0
# pylint: disable=broad-except
# Anything can go wrong during the execution of the `eval`
# function. For this reason we must try to catch anything that
# may come our way so that we may give out a warning message
# and ignore it.
except Exception as exception:
cond = None
enum = 1
trace(
'WARNING: error in line %d of %r: %s\n' % (
line_num, fname, exception.message
)
)
return cond, enum
def _read_config(fname, arg):
"""Simple parser to read configuration files. """
data = OrderedDict()
sec = None
line_num = 0
with open(fname, 'r') as fhandle:
for line in fhandle:
line_num += 1
if line[0] == '[':
sec = line[1:-2]
data[sec] = OrderedDict()
elif '=' in line:
tmp = line.split('=', 1)
key = tmp[0].strip()
val = tmp[1].strip()
val = os.path.expandvars(val)
replacements = _get_replacements(
RE.findall(val), data, sec
)
# pylint: disable=star-args
if replacements:
val = replace(val, *replacements)
match = RE_IFELSE.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
groups = match.groups()
val = groups[0] if cond else groups[2]
else:
match = RE_IF.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
if cond:
val = match.group('iftrue')
else:
continue
data[sec][key] = val
return data
def read_config(arg):
"""Read the configuration file xcpp.config"""
path = arg.cfg
if path == '.' and not os.path.exists('xcpp.config'):
if 'XCPP_CONFIG_PATH' in os.environ:
tmp_path = os.environ['XCPP_CONFIG_PATH']
if os.path.exists('%s/xcpp.config' % tmp_path):
trace("Configured with: '%s/xcpp.config'\n" % tmp_path)
path = tmp_path
elif not os.path.exists('%s/xcpp.config' % path):
error("ERROR: %s/xcpp.config does not exist\n" % path)
arg.cfg = path
try:
config = _read_config('%s/xcpp.config' % path, arg)
except IOError:
config = OrderedDict()
return config
def run(arg):
"""Run command. """
config = read_config(arg)
if arg.v:
disp('path to xcpp.config: "%s"\n' % arg.cfg)
if arg.var is None:
for sec in config:
disp('[%s]\n' % sec)
for key in config[sec]:
disp(' %s = %s\n' % (key, config[sec][key]))
disp('\n')
return
try:
command, var = arg.var.split('.', 1)
except ValueError:
error("ERROR: '%s' is not of the form sec.key\n" % arg.var)
try:
disp(config[command][var]+'\n')
except KeyError:
pass
return
def _update_single(cfg, name, defaults=None):
"Helper function for get_cfg."
if defaults:
for var, val in defaults.iteritems():
cfg[name][var] = os.path.expandvars(str(val))
else:
mod = import_mod('excentury.command.%s' % name)
if hasattr(mod, "DEFAULTS"):
for var, val in mod.DEFAULTS.iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_file(cfg, name, cfg_file):
"Helper function for get_cfg."
if name in cfg_file:
for var, val in cfg_file[name].iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_arg(cfg, argdict, key):
"Helper function for get_cfg."
for var in cfg[key]:
if var in argdict and argdict[var] is not None:
cfg[key][var] = argdict[var]
def get_cfg(arg, names, defaults=None):
"""Obtain the settings for a command. """
cfg = {
'xcpp': {
'root': '.',
'path': '.'
}
}
cfg_file = read_config(arg)
if 'xcpp' in cfg_file:
for var, val in cfg_file['xcpp'].iteritems():
cfg['xcpp'][var] = os.path.expandvars(val)
cfg['xcpp']['root'] = arg.cfg
if isinstance(names, list):
for name in names:
cfg[name] = dict()
_update_single(cfg, name)
_update_from_file(cfg, name, cfg_file)
else:
if names != 'xcpp':
cfg[names] = dict()
_update_single(cfg, names, defaults)
_update_from_file(cfg, names, cfg_file)
argdict = vars(arg)
if arg.parser_name in cfg:
_update_from_arg(cfg, argdict, arg.parser_name)
elif arg.parser_name == 'to' and arg.lang in cfg:
_update_from_arg(cfg, argdict, arg.lang)
_update_from_arg(cfg, argdict, 'xcpp')
return cfg
| 33.175676
| 77
| 0.559063
| 1,253
| 9,820
| 4.295291
| 0.227454
| 0.024155
| 0.012263
| 0.014121
| 0.168711
| 0.139911
| 0.122074
| 0.090115
| 0.060201
| 0.050167
| 0
| 0.007828
| 0.323523
| 9,820
| 295
| 78
| 33.288136
| 0.802348
| 0.200102
| 0
| 0.159624
| 0
| 0
| 0.144457
| 0.006364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065728
| false
| 0.004695
| 0.037559
| 0
| 0.150235
| 0.018779
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5d1f91e8b522de235f963587514841692890ab
| 4,696
|
py
|
Python
|
tests/test_urls.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 8
|
2020-10-12T02:59:03.000Z
|
2022-03-20T15:06:50.000Z
|
tests/test_urls.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 3
|
2020-10-13T16:30:09.000Z
|
2021-01-07T23:57:05.000Z
|
tests/test_urls.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 5
|
2020-10-12T14:57:41.000Z
|
2021-12-30T11:52:34.000Z
|
# -*- coding: utf-8 -*-
'''
Created on Thu Nov 19 20:52:33 2015
@author: SW274998
'''
from nseta.common.commons import *
import datetime
import unittest
import time
from bs4 import BeautifulSoup
from tests import htmls
import json
import requests
import six
from nseta.common.urls import *
import nseta.common.urls as urls
from six.moves.urllib.parse import urlparse
from baseUnitTest import baseUnitTest
class TestUrls(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
proxy_on = False
if proxy_on:
urls.session.proxies.update({'http': 'proxy1.wipro.com:8080'})
def runTest(self):
for key in TestUrls.__dict__.keys():
if key.find('test') == 0:
TestUrls.__dict__[key](self)
def test_get_symbol_count(self):
count = get_symbol_count(symbol='SBIN')
self.assertEqual(count, '1')
force_count = get_symbol_count(symbol='SBIN', force_refresh=True)
self.assertEqual(force_count, '1')
def test_equity_history_url(self):
sym_count = get_symbol_count(symbol='SBIN')
txt = 'Data for SBIN - EQ'
resp = equity_history_url(symbol='SBIN',
symbolCount=sym_count,
series='EQ',
fromDate='01-01-2000',
toDate='10-01-2000',
dateRange='')
self.assertGreaterEqual(resp.text.find(txt), 0, resp.text)
def test_nse_intraday_url(self):
txt = 'date|g1_o|g1_h|g1_l|g1_c|g2|g2_CUMVOL' #'<columns><column>date</column><column>pltp</column><column>nltp</column><column>previousclose</column><column>allltp</column>'
resp = nse_intraday_url(CDSymbol='SBIN', Periodicity='1')
self.assertIn(txt, resp.text)
def test_price_list_url(self):
resp = price_list_url('2019', 'DEC', '31DEC2019')
csv = unzip_str(resp.content)
self.assertGreaterEqual(csv.find('SBIN'), 0)
def tests_daily_volatility_url(self):
resp = daily_volatility_url('19112015')
self.assertGreaterEqual(resp.text.find('SBIN'), 0)
def test_pr_price_list_zipped_url(self):
resp = pr_price_list_zipped_url('191115')
csv = unzip_str(resp.content)
def test_index_history_url(self):
resp = index_history_url(indexType='NIFTY 50',
fromDate='01-01-2015',
toDate='10-01-2015')
self.assertGreaterEqual(resp.text.find('High'), 0)
self.assertGreaterEqual(resp.text.find('Low'), 0)
def test_index_daily_snapshot_url(self):
resp = index_daily_snapshot_url('06012020')
csv = str(resp.content)
self.assertGreaterEqual(csv.find('Nifty 50'), 0)
self.assertGreaterEqual(csv.find('Nifty IT'), 0)
self.assertGreaterEqual(csv.find('Nifty Bank'), 0)
self.assertGreaterEqual(csv.find('Nifty Next 50'), 0)
def test_index_pe_history_url(self):
resp = index_pe_history_url(fromDate='01-01-2015',
toDate='10-01-2015',
indexName='NIFTY 50')
self.assertGreaterEqual(resp.text.find('<th>P/E'), 0)
self.assertGreaterEqual(resp.text.find('<th>P/B'), 0)
def test_index_vix_history_url(self):
resp = index_vix_history_url(fromDate='01-Jan-2015',
toDate='10-Jan-2015',
)
self.assertGreaterEqual(resp.text.find('VIX'), 0)
self.assertGreaterEqual(resp.text.find('Change'), 0)
def test_derivative_derivative_expiry_dates_url(self):
resp = derivative_expiry_dates_url()
self.assertGreaterEqual(resp.text.find('vixExpryDt'), 0)
def test_derivative_history_url(self):
resp = derivative_history_url(instrumentType='FUTIDX',
symbol='NIFTY',
expiryDate='26-12-2019',
optionType='select',
strikePrice='',
dateRange='',
fromDate='25-Dec-2019',
toDate='26-Dec-2019')
self.assertGreaterEqual(resp.text.find('NIFTY'), 0)
self.assertGreaterEqual(resp.text.find('Expiry'), 0)
def test_derivative_price_list_url(self):
resp = derivative_price_list_url('2019', 'JUL', '19JUL2019')
csv = unzip_str(resp.content)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUrls)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
| 34.277372
| 178
| 0.643526
| 590
| 4,696
| 4.925424
| 0.30339
| 0.121129
| 0.098417
| 0.113558
| 0.355816
| 0.235375
| 0.075705
| 0.020647
| 0
| 0
| 0
| 0.048696
| 0.199744
| 4,696
| 136
| 179
| 34.529412
| 0.724588
| 0.043441
| 0
| 0.063636
| 0
| 0
| 0.125809
| 0.029891
| 0
| 0
| 0
| 0
| 0.172727
| 1
| 0.145455
| false
| 0
| 0.118182
| 0
| 0.272727
| 0.063636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5d4359e9534912a4f50ac4cf894cf8797005d0
| 3,207
|
py
|
Python
|
accounts/forms.py
|
cheradenine/Django-CRM
|
692572ced050d314c1f880af8b4000c97cbf7440
|
[
"MIT"
] | 2
|
2019-08-30T14:42:45.000Z
|
2019-09-01T01:49:38.000Z
|
accounts/forms.py
|
cheradenine/Django-CRM
|
692572ced050d314c1f880af8b4000c97cbf7440
|
[
"MIT"
] | 7
|
2021-03-31T20:01:14.000Z
|
2022-03-12T00:47:10.000Z
|
accounts/forms.py
|
gthreepwood/Django-CRM
|
12de7e6c622d9d7483c210212c8b7fe3dbde2739
|
[
"MIT"
] | 1
|
2021-10-09T10:03:46.000Z
|
2021-10-09T10:03:46.000Z
|
from django import forms
from .models import Account
from common.models import Comment, Attachments
from leads.models import Lead
from contacts.models import Contact
from django.db.models import Q
class AccountForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
account_view = kwargs.pop('account', False)
request_user = kwargs.pop('request_user', None)
super(AccountForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({'rows': '8'})
self.fields['status'].choices = [
(each[0], each[1]) for each in Account.ACCOUNT_STATUS_CHOICE]
self.fields['status'].required = False
for key, value in self.fields.items():
if key == 'phone':
value.widget.attrs['placeholder'] = "+91-123-456-7890"
else:
value.widget.attrs['placeholder'] = value.label
self.fields['billing_address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['billing_street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['billing_city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['billing_state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['billing_postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["billing_country"].choices = [
("", "--Country--"), ] + list(self.fields["billing_country"].choices)[1:]
self.fields["lead"].queryset = Lead.objects.all(
).exclude(status='closed')
if request_user:
self.fields["lead"].queryset = Lead.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user)).exclude(status='closed')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
if account_view:
self.fields['billing_address_line'].required = True
self.fields['billing_street'].required = True
self.fields['billing_city'].required = True
self.fields['billing_state'].required = True
self.fields['billing_postcode'].required = True
self.fields['billing_country'].required = True
class Meta:
model = Account
fields = ('name', 'phone', 'email', 'website', 'industry',
'description', 'status',
'billing_address_line', 'billing_street',
'billing_city', 'billing_state',
'billing_postcode', 'billing_country', 'lead', 'contacts')
class AccountCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'account', 'commented_by')
class AccountAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'account')
| 41.115385
| 104
| 0.617399
| 342
| 3,207
| 5.637427
| 0.28655
| 0.108921
| 0.114627
| 0.072614
| 0.267635
| 0.090768
| 0.060166
| 0.060166
| 0.060166
| 0.060166
| 0
| 0.009046
| 0.241659
| 3,207
| 77
| 105
| 41.649351
| 0.783717
| 0
| 0
| 0.046154
| 0
| 0
| 0.193951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.092308
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5dc71b519a1377907665d2b2ecee494faf08a3
| 2,408
|
py
|
Python
|
pywren/pywren_ibm_cloud/invokers.py
|
thetolga/pywren-ibm-cloud
|
ce48c158cf469b55100ab68a75d3dcd6ae9a3ffe
|
[
"Apache-2.0"
] | null | null | null |
pywren/pywren_ibm_cloud/invokers.py
|
thetolga/pywren-ibm-cloud
|
ce48c158cf469b55100ab68a75d3dcd6ae9a3ffe
|
[
"Apache-2.0"
] | null | null | null |
pywren/pywren_ibm_cloud/invokers.py
|
thetolga/pywren-ibm-cloud
|
ce48c158cf469b55100ab68a75d3dcd6ae9a3ffe
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
import random
from pywren_ibm_cloud.cf_connector import CloudFunctions
logger = logging.getLogger(__name__)
class IBMCloudFunctionsInvoker:
def __init__(self, cf_config, retry_config):
self.namespace = cf_config['namespace']
self.endpoint = cf_config['endpoint']
self.cf_action_name = cf_config['action_name'] # Runtime
self.invocation_retry = retry_config['invocation_retry']
self.retry_sleeps = retry_config['retry_sleeps']
self.retries = retry_config['retries']
self.client = CloudFunctions(cf_config)
log_msg = 'IBM Cloud Functions init for {}'.format(self.cf_action_name)
logger.info(log_msg)
if(logger.getEffectiveLevel() == logging.WARNING):
print(log_msg)
def invoke(self, payload):
"""
Invoke -- return information about this invocation
"""
act_id = self.client.invoke(self.cf_action_name, payload)
attempts = 1
while not act_id and self.invocation_retry and attempts < self.retries:
attempts += 1
selected_sleep = random.choice(self.retry_sleeps)
exec_id = payload['executor_id']
call_id = payload['call_id']
log_msg = ('Executor ID {} Function {} - Invocation failed - retry {} in {} seconds'.format(exec_id, call_id, attempts, selected_sleep))
logger.debug(log_msg)
time.sleep(selected_sleep)
act_id = self.client.invoke(self.cf_action_name, payload)
return act_id
def config(self):
"""
Return config dict
"""
return {'cf_action_name': self.cf_action_name,
'cf_namespace': self.namespace,
'cf_endpoint': self.endpoint}
| 34.898551
| 148
| 0.658638
| 298
| 2,408
| 5.124161
| 0.40604
| 0.045842
| 0.047151
| 0.05239
| 0.081205
| 0.057629
| 0.057629
| 0.057629
| 0.057629
| 0.057629
| 0
| 0.005556
| 0.252492
| 2,408
| 68
| 149
| 35.411765
| 0.842778
| 0.260382
| 0
| 0.057143
| 0
| 0
| 0.127981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc5f5a1b908ccb47f94225746e71f15650a97363
| 4,160
|
py
|
Python
|
Projet1/Dataset/addlinkRealExample.py
|
Arugakente/DataScienceP1
|
94ca874ed8a76a89a3da9ecf2fe6e554700f0507
|
[
"MIT"
] | null | null | null |
Projet1/Dataset/addlinkRealExample.py
|
Arugakente/DataScienceP1
|
94ca874ed8a76a89a3da9ecf2fe6e554700f0507
|
[
"MIT"
] | null | null | null |
Projet1/Dataset/addlinkRealExample.py
|
Arugakente/DataScienceP1
|
94ca874ed8a76a89a3da9ecf2fe6e554700f0507
|
[
"MIT"
] | null | null | null |
import os
import random
inputDirectory = "./original"
outputDirectory = "./processed"
#probability parameters
TopLevel = 0.6
SecondLevel = 0.5
ThirdLevel = 0.4
FourAndAbove = 0.2
pickInside = 0.5
pickOutside = 0.25
topics = []
siteLevel = []
fileStructure = []
count = 0
def findPossibleIndex(toParse):
toReturn = []
for current in range(0,len(toParse)):
if toParse[current] == " ":
toReturn.append(current)
toReturn.append(len(toParse))
return toReturn
def manageFile(inputPath,outputPath,topicIndex,currentLevel,filename):
count = 0
content = open(inputPath , 'r')
output = open(outputPath ,"w")
currentLine = content.readline()
outputFile = ""
while currentLine:
currentLine = content.readline()
randomPick = random.uniform(0.0,2.0)
if randomPick <= pickInside+pickOutside :
possibleIndexes = findPossibleIndex(currentLine)
insertPosition = possibleIndexes[random.randint(0,len(possibleIndexes)-1)]
selectedTopic = topicIndex
if(randomPick<=pickOutside):
while(selectedTopic == topicIndex):
selectedTopic = random.randint(0,len(topics)-1)
randomPick = random.uniform(0.0,4.0)
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove):
selectedLevel = 0
if(randomPick <= TopLevel):
selectedLevel = 1
if(randomPick <= TopLevel+ SecondLevel and randomPick > TopLevel):
selectedLevel = 2
if(randomPick <= TopLevel + SecondLevel + ThirdLevel and randomPick > TopLevel+ SecondLevel):
selectedLevel = 3
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove and randomPick > TopLevel + SecondLevel + ThirdLevel):
if(len(siteLevel[selectedTopic]) == 4):
selectedLevel = 4
else:
selectedLevel = random.randint(4,len(siteLevel[selectedTopic]))
i = 0
found = False
while i<len(siteLevel[selectedTopic]):
if siteLevel[selectedTopic][i] == str(selectedLevel)+"grade":
found = True
selectedLevel = i
i+=1
if(selectedLevel>=currentLevel):
fileLink = filename
while(fileLink == filename):
fileLink = fileStructure[selectedTopic][selectedLevel][random.randint(0,len(fileStructure[selectedTopic][selectedLevel])-1)]
fileLink = " linkTo:"+fileLink
count += 1
print(count)
if insertPosition == len(currentLine):
currentLine += fileLink
else:
currentLine = currentLine[0:insertPosition]+fileLink+currentLine[insertPosition:]
outputFile += currentLine
output.write(outputFile)
return count
topicIndex=0
for foldername in os.listdir(inputDirectory) :
if(foldername[0] != "."):
topics.append(foldername)
siteLevel.append([])
fileStructure.append([])
levelIndex=0
for categoryName in os.listdir(inputDirectory+"/"+foldername):
if(categoryName[0] != "."):
siteLevel[topicIndex].append(categoryName)
fileStructure[topicIndex].append([])
for filename in os.listdir(inputDirectory+"/"+foldername+"/"+categoryName):
if(filename[0] != "."):
fileStructure[topicIndex][levelIndex].append(filename)
levelIndex += 1
topicIndex += 1
for i in range(0,len(topics)):
for j in range(0,len(siteLevel[i])):
for k in range(0,len(fileStructure[i][j])):
count += manageFile(inputDirectory+"/"+topics[i]+"/"+siteLevel[i][j]+"/"+fileStructure[i][j][k],outputDirectory+"/"+fileStructure[i][j][k],i,j,fileStructure[i][j][k])
print(str(count)+" liens créés")
| 33.821138
| 178
| 0.571394
| 364
| 4,160
| 6.53022
| 0.222527
| 0.060581
| 0.073202
| 0.018511
| 0.127472
| 0.059739
| 0
| 0
| 0
| 0
| 0
| 0.017926
| 0.316106
| 4,160
| 123
| 179
| 33.821138
| 0.817575
| 0.005288
| 0
| 0.064516
| 0
| 0
| 0.014258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.021505
| 0
| 0.064516
| 0.021505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc61f699dd50ec363bb2a766f77f3f5058fefd54
| 13,616
|
py
|
Python
|
kkcalc/kk.py
|
benajamin/kkcalc
|
fcabfba288442dd297e3bd9910062c5db2231a91
|
[
"Zlib"
] | null | null | null |
kkcalc/kk.py
|
benajamin/kkcalc
|
fcabfba288442dd297e3bd9910062c5db2231a91
|
[
"Zlib"
] | 1
|
2021-02-09T10:18:14.000Z
|
2021-02-17T08:28:58.000Z
|
kkcalc/kk.py
|
benajamin/kkcalc
|
fcabfba288442dd297e3bd9910062c5db2231a91
|
[
"Zlib"
] | 3
|
2021-02-06T23:37:14.000Z
|
2022-01-19T15:26:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 Benjamin Watts, Daniel J. Lauk
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
"""This module implements the Kramers-Kronig transformation."""
import logging, sys
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.StreamHandler(stream=sys.stdout)
import math
import numpy
import os
import data
def calc_relativistic_correction(stoichiometry):
"""Calculate the relativistic correction to the Kramers-Kronig transform.
Parameters:
-----------
stoichiometry : array of integer/float pairs
Each pair in the list consists of an atomic number and the relative proportion of that element.
Returns
-------
This function returns a ``float`` holding the relativistic
corection to the Kramers-Kronig transform.
"""
correction = 0
for z, n in stoichiometry:
correction += (z - (z/82.5)**2.37) * n
return correction
def KK_General_PP(Eval_Energy, Energy, imaginary_spectrum, orders, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of columns of polynomial coefficients belonging to the power terms indicated by 'order'
orders : numpy vector of integers
The vector represents the polynomial indices corresponding to the columns of imaginary_spectrum
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using general piecewise-polynomial algorithm")
# Need to build x-E-n arrays
X = numpy.tile(Energy[:,numpy.newaxis,numpy.newaxis],(1,len(Eval_Energy),len(orders)))
E = numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy)-1,1,len(orders)))
C = numpy.tile(imaginary_spectrum[:,numpy.newaxis,:],(1,len(Eval_Energy),1))
N = numpy.tile(orders[numpy.newaxis,numpy.newaxis,:],(len(Energy)-1,len(Eval_Energy),1))
poles = numpy.equal(X,numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy),1,len(orders))))
# all N, ln(x+E) and ln(x-E) terms and poles
Integral = numpy.sum(-C*(-E)**N*numpy.log(numpy.absolute((X[1:,:,:]+E)/(X[:-1,:,:]+E)))-C*E**N*(1-poles[1:,:,:])*numpy.log(numpy.absolute((X[1:,:,:]-E+poles[1:,:,:])/((1-poles[:-1,:,:])*X[:-1,:,:]+poles[:-1,:,:]*X[[0]+list(range(len(Energy)-2)),:,:]-E))),axis=(0,2))
if numpy.any(orders<=-2): # N<=-2, ln(x) terms
i = [slice(None,None,None),slice(None,None,None),orders<=-2]
Integral += numpy.sum(C[i]*((-E[i])**N[i]+E[i]**N[i])*numpy.log(numpy.absolute((X[1:,:,orders<=-2])/(X[:-1,:,orders<=-2]))),axis=(0,2))
if numpy.any(orders>=0): # N>=0, x^k terms
for ni in numpy.where(orders>=0)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n,0,-2):
Integral += numpy.sum(C[i]/float(-k)*2*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
if numpy.any(orders <=-3): # N<=-3, x^k terms
for ni in numpy.where(orders<=-3)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n+2,0,2):
Integral += numpy.sum(C[i]/float(k)*((-1)**(n-k)+1)*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
logger.debug("Done!")
return Integral / math.pi + relativistic_correction
def KK_PP(Eval_Energy, Energy, imaginary_spectrum, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using (n from 1 to -3) piecewise-polynomial algorithm")
X1 = Energy[0:-1]
X2 = Energy[1:]
E = numpy.tile(Eval_Energy, (len(Energy)-1, 1)).T
Full_coeffs = imaginary_spectrum.T
Symb_1 = (( Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)-(Full_coeffs[3, :]/E+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))+Full_coeffs[4, :]/E*(X2**-1-X1**-1))
Symb_2 = ((-Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)+(Full_coeffs[3, :]/E-Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))-Full_coeffs[4, :]/E*(X2**-1-X1**-1))+(Full_coeffs[0, :]*E**2-Full_coeffs[1, :]*E+Full_coeffs[2, :]-Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2+E)/(X1+E)))
Symb_3 = (1-1*((X2==E)|(X1==E)))*(Full_coeffs[0, :]*E**2+Full_coeffs[1, :]*E+Full_coeffs[2, :]+Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2-E+1*(X2==E))/(X1-E+1*(X1==E))))
Symb_B = numpy.sum(Symb_2 - Symb_1 - Symb_3, axis=1) # Sum areas for approximate integral
# Patch singularities
hits = Energy[1:-1]==E[:,0:-1]
E_hits = numpy.append(numpy.insert(numpy.any(hits, axis=0),[0,0],False),[False,False])
Eval_hits = numpy.any(hits, axis=1)
X1 = Energy[E_hits[2:]]
XE = Energy[E_hits[1:-1]]
X2 = Energy[E_hits[:-2]]
C1 = Full_coeffs[:, E_hits[2:-1]]
C2 = Full_coeffs[:, E_hits[1:-2]]
Symb_singularities = numpy.zeros(len(Eval_Energy))
Symb_singularities[Eval_hits] = (C2[0, :]*XE**2+C2[1, :]*XE+C2[2, :]+C2[3, :]*XE**-1+C2[4, :]*XE**-2)*numpy.log(numpy.absolute((X2-XE)/(X1-XE)))
# Finish things off
KK_Re = (Symb_B-Symb_singularities) / (math.pi*Eval_Energy) + relativistic_correction
logger.debug("Done!")
return KK_Re
def improve_accuracy(Full_E, Real_Spectrum, Imaginary_Spectrum, relativistic_correction, tolerance, recursion=50):
"""Calculate extra data points so that a linear interpolation is more accurate.
Parameters
----------
Full_E : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
Real_Spectrum : numpy vector of `float`
The real part of the spectrum corresponding to magnitudes at photon energies in Full_E
Imaginary_Spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
(You can calculate the value using the `calc_relativistic_correction` function.)
tolerance : float
Level of error in linear extrapolation of data values to be allowed.
recursion : integer
Number of times an energy interval can be halved before giving up.
Returns
-------
This function returns a numpy array with three columns respectively representing photon energy, the real spectrum and the imaginary spectrum.
"""
logger.debug("Improve data accuracy")
new_points = numpy.cumsum(numpy.ones((len(Full_E)-2,1),dtype=numpy.int8))+1
Im_values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
#plot_Im_values = Im_values
Re_values = Real_Spectrum
E_values = Full_E
temp_Im_spectrum = Imaginary_Spectrum[1:]
count = 0
improved = 1
total_improved_points = 0
while count<recursion and numpy.sum(improved)>0:
#get E_midpoints
midpoints = (E_values[new_points-1]+E_values[new_points])/2.
#evaluate at new points
Im_midpoints = data.coeffs_to_ASF(midpoints, temp_Im_spectrum)
Re_midpoints = KK_PP(midpoints, Full_E, Imaginary_Spectrum, relativistic_correction)
#evaluate error levels
Im_error = abs((Im_values[new_points-1]+Im_values[new_points])/2. - Im_midpoints)
Re_error = abs((Re_values[new_points-1]+Re_values[new_points])/2. - Re_midpoints)
improved = (Im_error>tolerance) | (Re_error>tolerance)
logger.debug(str(numpy.sum(improved))+" points (out of "+str(len(improved))+") can be improved in pass number "+str(count+1)+".")
total_improved_points += numpy.sum(improved)
#insert new points and values
Im_values = numpy.insert(Im_values,new_points[improved],Im_midpoints[improved])
Re_values = numpy.insert(Re_values,new_points[improved],Re_midpoints[improved])
E_values = numpy.insert(E_values,new_points[improved],midpoints[improved])
#prepare for next loop
temp_Im_spectrum =numpy.repeat(temp_Im_spectrum[improved],2,axis=0)
new_points = numpy.where(numpy.insert(numpy.zeros(Im_values.shape, dtype=numpy.bool),new_points[improved],True))[0]
new_points = numpy.vstack((new_points, new_points+1)).T.flatten()
count += 1
#import matplotlib
#matplotlib.use('WXAgg')
#import pylab
#pylab.figure()
#pylab.plot(Full_E,plot_Im_values,'ok')
#pylab.plot(Full_E,Real_Spectrum,'og')
#pylab.plot(midpoints,Im_midpoints,'+b')
#pylab.plot(midpoints,Re_midpoints,'+r')
#pylab.plot(E_values,Im_values,'b-')
#pylab.plot(E_values,Re_values,'r-')
#pylab.plot(midpoints,Im_error,'b-')
#pylab.plot(midpoints,Re_error,'r-')
#pylab.xscale('log')
#pylab.show()
logger.info("Improved data accuracy by inserting "+str(total_improved_points)+" extra points.")
return numpy.vstack((E_values,Re_values,Im_values)).T
def kk_calculate_real(NearEdgeDataFile, ChemicalFormula, load_options=None, input_data_type=None, merge_points=None, add_background=False, fix_distortions=False, curve_tolerance=None, curve_recursion=50):
"""Do all data loading and processing and then calculate the kramers-Kronig transform.
Parameters
----------
NearEdgeDataFile : string
Path to file containg near-edge data
ChemicalFormula : string
A standard chemical formula string consisting of element symbols, numbers and parentheses.
merge_points : list or tuple pair of `float` values, or None
The photon energy values (low, high) at which the near-edge and scattering factor data values
are set equal so as to ensure continuity of the merged data set.
Returns
-------
This function returns a numpy array with columns consisting of the photon energy, the real and the imaginary parts of the scattering factors.
"""
Stoichiometry = data.ParseChemicalFormula(ChemicalFormula)
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
Full_E, Imaginary_Spectrum = data.calculate_asf(Stoichiometry)
if NearEdgeDataFile is not None:
NearEdge_Data = data.convert_data(data.load_data(NearEdgeDataFile, load_options),FromType=input_data_type,ToType='asf')
Full_E, Imaginary_Spectrum = data.merge_spectra(NearEdge_Data, Full_E, Imaginary_Spectrum, merge_points=merge_points, add_background=add_background, fix_distortions=fix_distortions)
Real_Spectrum = KK_PP(Full_E, Full_E, Imaginary_Spectrum, Relativistic_Correction)
if curve_tolerance is not None:
output_data = improve_accuracy(Full_E,Real_Spectrum,Imaginary_Spectrum, Relativistic_Correction, curve_tolerance, curve_recursion)
else:
Imaginary_Spectrum_Values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
output_data = numpy.vstack((Full_E,Real_Spectrum,Imaginary_Spectrum_Values)).T
return output_data
if __name__ == '__main__':
#use argparse here to get command line arguments
#process arguments and pass to a pythonic function
#I will abuse this section of code for initial testing
#Output = kk_calculate_real('../../data/Xy_norm_bgsub.txt', 'C10SH14', input_data_type='NEXAFS')
Output = kk_calculate_real('../../data/LaAlO3/LaAlO3_Exp.csv', 'LaAlO3', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
#Output = kk_calculate_real('../../data/GaAs/As.xmu.csv', 'GaAs', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
Stoichiometry = data.ParseChemicalFormula('LaAlO3')
#Stoichiometry = data.ParseChemicalFormula('GaAs')
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
ASF_E, ASF_Data = data.calculate_asf(Stoichiometry)
ASF_Data3 = data.coeffs_to_linear(ASF_E, ASF_Data, 0.1)
ASF_Data2 = data.coeffs_to_ASF(ASF_E, numpy.vstack((ASF_Data,ASF_Data[-1])))
#Test_E = (Output[1:,0]+Output[0:-1,0])*0.5
#Test_E = numpy.linspace(41257.87,41259.87,num=21)
#Real_Spectrum2 = KK_PP(Test_E, Output[:,0], Im, Relativistic_Correction)
import matplotlib
matplotlib.use('WXAgg')
import pylab
pylab.figure()
pylab.plot(Output[:,0],Output[:,1],'xg-',Output[:,0],Output[:,2],'xb-')
pylab.plot(ASF_E,ASF_Data2,'+r')
#pylab.plot(ASF_E,ASF_Data22,'xr')
pylab.plot(ASF_Data3[0],ASF_Data3[1],'r-')
#pylab.plot(Test_E,Real_Spectrum2,'*y')
pylab.xscale('log')
pylab.show()
| 47.608392
| 363
| 0.735238
| 2,109
| 13,616
| 4.586534
| 0.168326
| 0.045694
| 0.022744
| 0.017368
| 0.444123
| 0.399049
| 0.354389
| 0.344877
| 0.326889
| 0.311692
| 0
| 0.023127
| 0.113984
| 13,616
| 285
| 364
| 47.775439
| 0.77868
| 0.422444
| 0
| 0.106557
| 0
| 0
| 0.048957
| 0.004069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0.008197
| 0.057377
| 0
| 0.139344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc63326e97a96ff49b392fe1692ec3ec3a6b80ad
| 16,626
|
py
|
Python
|
src/plugins/maimaidx.py
|
LonelyFantasy/Chiyuki-Bot
|
16a91b96661825c2a367a12c30d6a28ad13b95a9
|
[
"MIT"
] | null | null | null |
src/plugins/maimaidx.py
|
LonelyFantasy/Chiyuki-Bot
|
16a91b96661825c2a367a12c30d6a28ad13b95a9
|
[
"MIT"
] | null | null | null |
src/plugins/maimaidx.py
|
LonelyFantasy/Chiyuki-Bot
|
16a91b96661825c2a367a12c30d6a28ad13b95a9
|
[
"MIT"
] | null | null | null |
import math
from collections import defaultdict
from typing import List, Dict, Any
from nonebot import on_command, on_message, on_notice, on_regex, get_driver
from nonebot.log import logger
from nonebot.permission import Permission
from nonebot.typing import T_State
from nonebot.adapters import Event, Bot
from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent
from src.libraries.maimaidx_guess import GuessObject
from src.libraries.tool import hash
from src.libraries.maimaidx_music import *
from src.libraries.image import *
from src.libraries.maimai_best_40 import generate
import requests
import json
import random
import time
import re
from urllib import parse
driver = get_driver()
@driver.on_startup
def _():
logger.info("Load help text successfully")
help_text: dict = get_driver().config.help_text
help_text['mai'] = ('查看舞萌相关功能', """桜千雪です、よろしく。
可用命令如下:
今日舞萌 查看今天的舞萌运势
XXXmaimaiXXX什么 随机一首歌
随个[dx/标准][绿黄红紫白]<难度> 随机一首指定条件的乐曲
查歌<乐曲标题的一部分> 查询符合条件的乐曲
[绿黄红紫白]id<歌曲编号> 查询乐曲信息或谱面信息
<歌曲别名>是什么歌 查询乐曲别名对应的乐曲
定数查歌 <定数> 查询定数对应的乐曲
定数查歌 <定数下限> <定数上限>
分数线 <难度+歌曲id> <分数线> 详情请输入“分数线 帮助”查看""")
def song_txt(music: Music):
return Message([
{
"type": "text",
"data": {
"text": f"{music.id}. {music.title}\n"
}
},
{
"type": "image",
"data": {
"file": f"https://www.diving-fish.com/covers/{music.id}.jpg"
}
},
{
"type": "text",
"data": {
"text": f"\n{'/'.join(music.level)}"
}
}
])
def inner_level_q(ds1, ds2=None):
result_set = []
diff_label = ['Bas', 'Adv', 'Exp', 'Mst', 'ReM']
if ds2 is not None:
music_data = total_list.filter(ds=(ds1, ds2))
else:
music_data = total_list.filter(ds=ds1)
for music in music_data:
for i in music.diff:
result_set.append((music['id'], music['title'], music['ds'][i], diff_label[i], music['level'][i]))
return result_set
inner_level = on_command('inner_level ', aliases={'定数查歌 '})
@inner_level.handle()
async def _(bot: Bot, event: Event, state: T_State):
argv = str(event.get_message()).strip().split(" ")
if len(argv) > 2 or len(argv) == 0:
await inner_level.finish("命令格式为\n定数查歌 <定数>\n定数查歌 <定数下限> <定数上限>")
return
if len(argv) == 1:
result_set = inner_level_q(float(argv[0]))
else:
result_set = inner_level_q(float(argv[0]), float(argv[1]))
if len(result_set) > 50:
await inner_level.finish("数据超出 50 条,请尝试缩小查询范围")
return
s = ""
for elem in result_set:
s += f"{elem[0]}. {elem[1]} {elem[3]} {elem[4]}({elem[2]})\n"
await inner_level.finish(s.strip())
spec_rand = on_regex(r"^随个(?:dx|sd|标准)?[绿黄红紫白]?[0-9]+\+?")
@spec_rand.handle()
async def _(bot: Bot, event: Event, state: T_State):
level_labels = ['绿', '黄', '红', '紫', '白']
regex = "随个((?:dx|sd|标准))?([绿黄红紫白]?)([0-9]+\+?)"
res = re.match(regex, str(event.get_message()).lower())
try:
if res.groups()[0] == "dx":
tp = ["DX"]
elif res.groups()[0] == "sd" or res.groups()[0] == "标准":
tp = ["SD"]
else:
tp = ["SD", "DX"]
level = res.groups()[2]
if res.groups()[1] == "":
music_data = total_list.filter(level=level, type=tp)
else:
music_data = total_list.filter(level=level, diff=['绿黄红紫白'.index(res.groups()[1])], type=tp)
await spec_rand.send(song_txt(music_data.random()))
except Exception as e:
print(e)
await spec_rand.finish("随机命令错误,请检查语法")
mr = on_regex(r".*maimai.*什么")
@mr.handle()
async def _(bot: Bot, event: Event, state: T_State):
await mr.finish(song_txt(total_list.random()))
search_music = on_regex(r"^查歌.+")
@search_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "查歌(.+)"
name = re.match(regex, str(event.get_message())).groups()[0].strip()
if name == "":
return
res = total_list.filter(title_search=name)
await search_music.finish(Message([
{"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}} for music in res]))
query_chart = on_regex(r"^([绿黄红紫白]?)id([0-9]+)")
@query_chart.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "([绿黄红紫白]?)id([0-9]+)"
groups = re.match(regex, str(event.get_message())).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
if groups[0] != "":
try:
level_index = level_labels.index(groups[0])
level_name = ['Basic', 'Advanced', 'Expert', 'Master', 'Re: MASTER']
name = groups[1]
music = total_list.by_id(name)
chart = music['charts'][level_index]
ds = music['ds'][level_index]
level = music['level'][level_index]
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
if len(chart['notes']) == 4:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
BREAK: {chart['notes'][3]}
谱师: {chart['charter']}
'''
else:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
TOUCH: {chart['notes'][3]}
BREAK: {chart['notes'][4]}
谱师: {chart['charter']}
'''
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": msg
}
}
]))
except Exception:
await query_chart.send("未找到该谱面")
else:
name = groups[1]
music = total_list.by_id(name)
try:
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": f"艺术家: {music['basic_info']['artist']}\n分类: {music['basic_info']['genre']}\nBPM: {music['basic_info']['bpm']}\n版本: {music['basic_info']['from']}\n难度: {'/'.join(music['level'])}"
}
}
]))
except Exception:
await query_chart.send("未找到该乐曲")
wm_list = ['拼机', '推分', '越级', '下埋', '夜勤', '练底力', '练手法', '打旧框', '干饭', '抓绝赞', '收歌']
jrwm = on_command('今日舞萌', aliases={'今日mai'})
@jrwm.handle()
async def _(bot: Bot, event: Event, state: T_State):
qq = int(event.get_user_id())
h2 = hash(qq)
h = h2
rp = h % 100
wm_value = []
for i in range(11):
wm_value.append(h & 3)
h >>= 2
s = f"今日人品值:{rp}\n"
for i in range(11):
if wm_value[i] == 3:
s += f'宜 {wm_list[i]}\n'
elif wm_value[i] == 0:
s += f'忌 {wm_list[i]}\n'
s += "千雪提醒您:打机时不要大力拍打或滑动哦\n今日推荐歌曲:"
music = total_list[h2 % len(total_list)]
await jrwm.finish(Message([
{"type": "text", "data": {"text": s}}
] + song_txt(music)))
music_aliases = defaultdict(list)
f = open('src/static/aliases.csv', 'r', encoding='utf-8')
tmp = f.readlines()
f.close()
for t in tmp:
arr = t.strip().split('\t')
for i in range(len(arr)):
if arr[i] != "":
music_aliases[arr[i].lower()].append(arr[0])
find_song = on_regex(r".+是什么歌")
@find_song.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "(.+)是什么歌"
name = re.match(regex, str(event.get_message())).groups()[0].strip().lower()
if name not in music_aliases:
await find_song.finish("未找到此歌曲\n舞萌 DX 歌曲别名收集计划:https://docs.qq.com/sheet/DSXhaUXVsRlhxRmtJ")
return
result_set = music_aliases[name]
if len(result_set) == 1:
music = total_list.by_title(result_set[0])
await find_song.finish(Message([{"type": "text", "data": {"text": "您要找的是不是"}}] + song_txt(music)))
else:
s = '\n'.join(result_set)
await find_song.finish(f"您要找的可能是以下歌曲中的其中一首:\n{ s }")
query_score = on_command('分数线')
query_score_text = '''此功能为查找某首歌分数线设计。
命令格式:分数线 <难度+歌曲id> <分数线>
例如:分数线 白337 100
命令将返回分数线允许的 TAP GREAT 容错以及 BREAK 50落等价的 TAP GREAT 数。
以下为 TAP GREAT 的对应表:
GREAT/GOOD/MISS
TAP 1/2.5/5
HOLD 2/5/10
SLIDE 3/7.5/15
TOUCH 1/2.5/5
BREAK 5/12.5/25(外加200落)'''
query_score_mes = Message([{
"type": "image",
"data": {
"file": f"base64://{str(image_to_base64(text_to_image(query_score_text)), encoding='utf-8')}"
}
}])
@query_score.handle()
async def _(bot: Bot, event: Event, state: T_State):
r = "([绿黄红紫白])(?:id)?([0-9]+)"
argv = str(event.get_message()).strip().split(" ")
if len(argv) == 1 and argv[0] == '帮助':
await query_score.send(query_score_mes)
elif len(argv) == 2:
try:
grp = re.match(r, argv[0]).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
level_labels2 = ['Basic', 'Advanced', 'Expert', 'Master', 'Re:MASTER']
level_index = level_labels.index(grp[0])
chart_id = grp[1]
line = float(argv[1])
music = total_list.by_id(chart_id)
chart: Dict[Any] = music['charts'][level_index]
tap = int(chart['notes'][0])
slide = int(chart['notes'][2])
hold = int(chart['notes'][1])
touch = int(chart['notes'][3]) if len(chart['notes']) == 5 else 0
brk = int(chart['notes'][-1])
total_score = 500 * tap + slide * 1500 + hold * 1000 + touch * 500 + brk * 2500
break_bonus = 0.01 / brk
break_50_reduce = total_score * break_bonus / 4
reduce = 101 - line
if reduce <= 0 or reduce >= 101:
raise ValueError
await query_chart.send(f'''{music['title']} {level_labels2[level_index]}
分数线 {line}% 允许的最多 TAP GREAT 数量为 {(total_score * reduce / 10000):.2f}(每个-{10000 / total_score:.4f}%),
BREAK 50落(一共{brk}个)等价于 {(break_50_reduce / 100):.3f} 个 TAP GREAT(-{break_50_reduce / total_score * 100:.4f}%)''')
except Exception:
await query_chart.send("格式错误或未找到乐曲,输入“分数线 帮助”以查看帮助信息")
best_40_pic = on_command('b40')
@best_40_pic.handle()
async def _(bot: Bot, event: Event, state: T_State):
username = str(event.get_message()).strip()
print(event.message_id)
if username == "":
payload = {'qq': str(event.get_user_id())}
else:
payload = {'username': username}
img, success = await generate(payload)
if success == 400:
await best_40_pic.send("未找到此玩家,请确保此玩家的用户名和查分器中的用户名相同。")
elif success == 403:
await best_40_pic.send("该用户禁止了其他人获取数据。")
else:
await best_40_pic.send(Message([
MessageSegment.reply(event.message_id),
MessageSegment.image(f"base64://{str(image_to_base64(img), encoding='utf-8')}")
]))
disable_guess_music = on_command('猜歌设置', priority=0)
@disable_guess_music.handle()
async def _(bot: Bot, event: Event):
if event.message_type != "group":
return
arg = str(event.get_message())
group_members = await bot.get_group_member_list(group_id=event.group_id)
for m in group_members:
if m['user_id'] == event.user_id:
break
su = get_driver().config.superusers
if m['role'] != 'owner' and m['role'] != 'admin' and str(m['user_id']) not in su:
await disable_guess_music.finish("只有管理员可以设置猜歌")
return
db = get_driver().config.db
c = await db.cursor()
if arg == '启用':
await c.execute(f'update guess_table set enabled=1 where group_id={event.group_id}')
elif arg == '禁用':
await c.execute(f'update guess_table set enabled=0 where group_id={event.group_id}')
else:
await disable_guess_music.finish("请输入 猜歌设置 启用/禁用")
await db.commit()
await disable_guess_music.finish("设置成功")
guess_dict: Dict[Tuple[str, str], GuessObject] = {}
guess_cd_dict: Dict[Tuple[str, str], float] = {}
guess_music = on_command('猜歌', priority=0)
async def guess_music_loop(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(10)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
cycle = state["cycle"]
if cycle < 6:
asyncio.create_task(bot.send(event, f"{cycle + 1}/7 这首歌" + guess.guess_options[cycle]))
else:
asyncio.create_task(bot.send(event, Message([
MessageSegment.text("7/7 这首歌封面的一部分是:"),
MessageSegment.image("base64://" + str(guess.b64image, encoding="utf-8")),
MessageSegment.text("答案将在 30 秒后揭晓")
])))
asyncio.create_task(give_answer(bot, event, state))
return
state["cycle"] += 1
asyncio.create_task(guess_music_loop(bot, event, state))
async def give_answer(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(30)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
asyncio.create_task(bot.send(event, Message([MessageSegment.text("答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"), MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")])))
del guess_dict[state["k"]]
@guess_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if mt == "group":
gid = event.group_id
db = get_driver().config.db
c = await db.cursor()
await c.execute(f"select * from guess_table where group_id={gid}")
data = await c.fetchone()
if data is None:
await c.execute(f'insert into guess_table values ({gid}, 1)')
elif data[1] == 0:
await guess_music.send("本群已禁用猜歌")
return
if k in guess_dict:
if k in guess_cd_dict and time.time() > guess_cd_dict[k] - 400:
# 如果已经过了 200 秒则自动结束上一次
del guess_dict[k]
else:
await guess_music.send("当前已有正在进行的猜歌")
return
whitelists = get_driver().config.whitelists
if not (mt == "group" and gid in whitelists):
if len(guess_dict) >= 5:
await guess_music.finish("千雪有点忙不过来了。现在正在猜的群有点多,晚点再试试如何?")
return
if k in guess_cd_dict and time.time() < guess_cd_dict[k]:
await guess_music.finish(f"已经猜过啦,下次猜歌会在 {time.strftime('%H:%M', time.localtime(guess_cd_dict[k]))} 可用噢")
return
guess = GuessObject()
guess_dict[k] = guess
state["k"] = k
state["guess_object"] = guess
state["cycle"] = 0
guess_cd_dict[k] = time.time() + 600
await guess_music.send("我将从热门乐曲中选择一首歌,并描述它的一些特征,请输入歌曲的【id】、【歌曲标题】或【歌曲标题中 5 个以上连续的字符】进行猜歌(DX乐谱和标准乐谱视为两首歌)。猜歌时查歌等其他命令依然可用。\n警告:这个命令可能会很刷屏,管理员可以使用【猜歌设置】指令进行设置。")
asyncio.create_task(guess_music_loop(bot, event, state))
guess_music_solve = on_message(priority=20)
@guess_music_solve.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if k not in guess_dict:
return
ans = str(event.get_message())
guess = guess_dict[k]
# await guess_music_solve.send(ans + "|" + guess.music['id'])
if ans == guess.music['id'] or (ans.lower() == guess.music['title'].lower()) or (len(ans) >= 5 and ans.lower() in guess.music['title'].lower()):
guess.is_end = True
del guess_dict[k]
await guess_music_solve.finish(Message([
MessageSegment.reply(event.message_id),
MessageSegment.text("猜对了,答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"),
MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")
]))
| 33.318637
| 216
| 0.566582
| 2,199
| 16,626
| 4.138699
| 0.192815
| 0.031865
| 0.016921
| 0.024613
| 0.394682
| 0.345127
| 0.303154
| 0.262499
| 0.247226
| 0.197451
| 0
| 0.020239
| 0.265969
| 16,626
| 498
| 217
| 33.385542
| 0.7255
| 0.004812
| 0
| 0.277389
| 0
| 0.013986
| 0.224687
| 0.058091
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006993
| false
| 0
| 0.04662
| 0.002331
| 0.090909
| 0.004662
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc66871c7a70ed30f6605efa0e99f0abf3ceaa25
| 4,854
|
py
|
Python
|
layers/gin_layer.py
|
JakeStevens/benchmarking-gnns
|
a17fdf1b1d758fc65d5eeaf3726f5efa747a4081
|
[
"MIT"
] | 275
|
2020-10-22T22:03:33.000Z
|
2022-03-25T06:08:05.000Z
|
semisupervised_MNIST_CIFAR10/pre-training/layers/gin_layer.py
|
xgbt/GraphCL
|
d857849d51bb168568267e07007c0b0c8bb6d869
|
[
"MIT"
] | 43
|
2020-10-30T08:28:01.000Z
|
2022-03-31T16:55:12.000Z
|
semisupervised_MNIST_CIFAR10/pre-training/layers/gin_layer.py
|
xgbt/GraphCL
|
d857849d51bb168568267e07007c0b0c8bb6d869
|
[
"MIT"
] | 70
|
2020-10-28T19:14:18.000Z
|
2022-03-27T06:11:51.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
graph_norm :
boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h, snorm_n):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.graph_norm:
h = h* snorm_n # normalize activation w.r.t. graph size
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
| 32.577181
| 124
| 0.581994
| 633
| 4,854
| 4.279621
| 0.273302
| 0.0299
| 0.019195
| 0.016611
| 0.054264
| 0.045035
| 0.045035
| 0
| 0
| 0
| 0
| 0.00726
| 0.318912
| 4,854
| 149
| 125
| 32.577181
| 0.81216
| 0.237948
| 0
| 0.109756
| 0
| 0
| 0.028912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.04878
| 0
| 0.207317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc68e0edf30588586dedbfe1358fdf97ab01598d
| 4,783
|
py
|
Python
|
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
#-- coding: utf-8 --
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import collections
from PIL import Image, ImageOps, ImageDraw, ImageFont
code_2_icono = collections.defaultdict(lambda : '38')
kor_2_eng = collections.defaultdict(lambda : 'UNKNOWN')
code_2_icono['SKY_O00'] = ['38']
code_2_icono['SKY_O01'] = ['01', '08']
code_2_icono['SKY_O02'] = ['02', '09']
code_2_icono['SKY_O03'] = ['03', '10']
code_2_icono['SKY_O04'] = ['12', '40']
code_2_icono['SKY_O05'] = ['13', '41']
code_2_icono['SKY_O06'] = ['14', '42']
code_2_icono['SKY_O07'] = ['18']
code_2_icono['SKY_O08'] = ['21']
code_2_icono['SKY_O09'] = ['32']
code_2_icono['SKY_O10'] = ['04']
code_2_icono['SKY_O11'] = ['29']
code_2_icono['SKY_O12'] = ['26']
code_2_icono['SKY_O13'] = ['27']
code_2_icono['SKY_O14'] = ['28']
code_2_icono['SKY_W00'] = ['38']
code_2_icono['SKY_W01'] = ['01', '08']
code_2_icono['SKY_W02'] = ['02', '09']
code_2_icono['SKY_W03'] = ['03', '10']
code_2_icono['SKY_W04'] = ['18']
code_2_icono['SKY_W07'] = ['21']
code_2_icono['SKY_W09'] = ['12', '40']
code_2_icono['SKY_W10'] = ['21']
code_2_icono['SKY_W11'] = ['04']
code_2_icono['SKY_W12'] = ['13', '41']
code_2_icono['SKY_W13'] = ['32']
kor_2_eng[u'좋음'] = ['GOOD']
kor_2_eng[u'보통'] = ['NORMAL']
kor_2_eng[u'나쁨'] = ['BAD']
kor_2_eng[u'매우 나쁨'] = ['V BAD']
def geticonfname(code, drawNight=False):
l = code_2_icono[code]
dname = os.path.join(os.path.dirname(__file__), "resources", "weather_icons_mod")
if len(l) > 1 and drawNight:
cur_hour = time.localtime().tm_hour
is_night = cur_hour < 5 or cur_hour > 18
if is_night:
return os.path.join(dname, l[1] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
BLACK = 0
WHITE = 1
class PapirusRenderer:
"""Renderer for Papirus HAT"""
def __init__(self, rotate=0, font_path=None):
if font_path:
self.font_path = font_path
else:
self.font_path = "/usr/share/fonts/truetype/freefont/FreeMono.ttf"
print("rotate:",rotate)
try:
from papirus import Papirus
self.papirus = Papirus(rotate=rotate)
self.canvas_size = self.papirus.size
print("papirus size : %s"%str(self.canvas_size))
except ImportError:
print("papirus import failed")
self.papirus = None
self.canvas_size = (264,176)
def render(self, weather, weather_forecast):
canvas = Image.new('1', self.canvas_size, WHITE)
print("font_path:",self.font_path)
fname = geticonfname(weather.weather_code, drawNight=True)
print("file:",fname)
self._drawImage(canvas, fname, 20,10,(100,100))
print("cur desc : %s"%str(weather.weather_desc))
print("cur airq : %s"%str(weather.air_quality))
temperature = str(weather.cur_temperature).split('.')[0] + u" \u2103"
self._drawText(canvas, temperature, 70,115, font_size=20, center_horizontal=True)
translated = kor_2_eng[weather.air_quality][0]
print("cur airq translated: %s"%translated)
self._drawText(canvas, translated, 70,140, font_size=20, center_horizontal=True)
base_x,base_y = 145,5
for i,w in enumerate(weather_forecast):
fname = geticonfname(w.weather_code)
self._drawImage(canvas, fname, base_x, base_y+55*i, (50,50))
temperature = str(w.min_temperature) + " / " + str(w.max_temperature)
self._drawText(canvas, temperature, base_x+80, base_y+28+55*i, font_size=14, center_horizontal=True)
# update time
self._drawText(canvas, time.strftime("%Y-%m-%d %H:%M",time.localtime()), 136, 165, font_size=9, center_horizontal=True)
if self.papirus == None:
# save a image for debugging purpose
with open("result.jpg", "wb") as fp:
canvas.save(fp)
print("result file saved")
else:
self.papirus.display(canvas)
self.papirus.update()
def _drawImage(self, canvas, image_path, x, y, size):
image = Image.open(image_path)
image = ImageOps.grayscale(image)
image = image.resize(size)
image = image.convert("1", dither=Image.FLOYDSTEINBERG)
canvas.paste(image,(x,y))
def _drawText(self, canvas, text, x, y, font_size=20, center_horizontal=False):
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype(self.font_path, font_size)
text_draw_size = draw.textsize(text, font=font)
if center_horizontal:
x = x - text_draw_size[0]/2
draw.text( (x, y) , text, font=font, fill=BLACK)
| 32.986207
| 127
| 0.615931
| 679
| 4,783
| 4.097202
| 0.306333
| 0.050324
| 0.100647
| 0.121495
| 0.185478
| 0.113228
| 0.022646
| 0.022646
| 0.022646
| 0.022646
| 0
| 0.064785
| 0.222245
| 4,783
| 144
| 128
| 33.215278
| 0.683065
| 0.019026
| 0
| 0.055046
| 0
| 0
| 0.117208
| 0.010034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045872
| false
| 0
| 0.073395
| 0
| 0.155963
| 0.082569
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc6ae61e76507e8c85be28c293840912ca2612a4
| 7,683
|
py
|
Python
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 37
|
2021-09-30T16:47:15.000Z
|
2022-03-07T22:29:21.000Z
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 36
|
2021-11-04T16:54:55.000Z
|
2022-03-31T02:26:29.000Z
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 20
|
2021-10-29T22:55:58.000Z
|
2022-03-22T17:27:16.000Z
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Assistant utility for automatically load network from network
description."""
import torch
class Assistant:
"""Assistant that bundles training, validation and testing workflow.
Parameters
----------
net : torch.nn.Module
network to train.
error : object or lambda
an error object or a lambda function that evaluates error.
It is expected to take ``(output, target)`` | ``(output, label)``
as it's argument and return a scalar value.
optimizer : torch optimizer
the learning optimizer.
stats : slayer.utils.stats
learning stats logger. If None, stats will not be logged.
Defaults to None.
classifier : slayer.classifier or lambda
classifier object or lambda function that takes output and
returns the network prediction. None means regression mode.
Classification steps are bypassed.
Defaults to None.
count_log : bool
flag to enable count log. Defaults to False.
lam : float
lagrangian to merge network layer based loss.
None means no such additional loss.
If not None, net is expected to return the accumulated loss as second
argument. It is intended to be used with layer wise sparsity loss.
Defaults to None.
Attributes
----------
net
error
optimizer
stats
classifier
count_log
lam
device : torch.device or None
the main device memory where network is placed. It is not at start and
gets initialized on the first call.
"""
def __init__(
self,
net, error, optimizer,
stats=None, classifier=None, count_log=False,
lam=None
):
self.net = net
self.error = error
self.optimizer = optimizer
self.classifier = classifier
self.stats = stats
self.count_log = count_log
self.lam = lam
self.device = None
def reduce_lr(self, factor=10 / 3):
"""Reduces the learning rate of the optimizer by ``factor``.
Parameters
----------
factor : float
learning rate reduction factor. Defaults to 10/3.
Returns
-------
"""
for param_group in self.optimizer.param_groups:
print('\nLearning rate reduction from', param_group['lr'])
param_group['lr'] /= factor
def train(self, input, target):
"""Training assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.train()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, net_loss, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, net_loss = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.training.num_samples += input.shape[0]
self.stats.training.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.training.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if self.lam is not None: # add net_loss before backward step
loss += self.lam * net_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if count is None:
return output
return output, count
def test(self, input, target):
"""Testing assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
with torch.no_grad():
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.testing.num_samples += input.shape[0]
self.stats.testing.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.testing.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
def valid(self, input, target):
"""Validation assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
with torch.no_grad():
device = self.net.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.validation.num_samples += input.shape[0]
if self.lam is None:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
else:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.validation.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
| 29.436782
| 78
| 0.521281
| 834
| 7,683
| 4.752998
| 0.196643
| 0.035318
| 0.036327
| 0.036327
| 0.534057
| 0.524975
| 0.524975
| 0.506307
| 0.506307
| 0.506307
| 0
| 0.003847
| 0.390993
| 7,683
| 260
| 79
| 29.55
| 0.843343
| 0.312508
| 0
| 0.674797
| 0
| 0
| 0.007214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04065
| false
| 0
| 0.00813
| 0
| 0.105691
| 0.00813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc6bfc96896fcc886f4088ddc53f2aa20f638760
| 1,309
|
py
|
Python
|
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
| 38.5
| 290
| 0.593583
| 165
| 1,309
| 4.581818
| 0.412121
| 0.066138
| 0.099206
| 0.115079
| 0.388889
| 0.318783
| 0.246032
| 0.246032
| 0.246032
| 0
| 0
| 0.067814
| 0.245225
| 1,309
| 33
| 291
| 39.666667
| 0.697368
| 0.016043
| 0
| 0.407407
| 0
| 0
| 0.205288
| 0.017885
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc6eb694558519bbe4bb770c6ebebf2c2317f744
| 2,160
|
py
|
Python
|
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | null | null | null |
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | 10
|
2021-05-01T19:57:06.000Z
|
2022-03-12T00:54:04.000Z
|
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from data_utils import get_data_path, get_image_data_path, get_image_extension
def app_id_to_image_filename(app_id, is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filename = image_data_path + str(app_id) + get_image_extension()
return image_filename
def image_filename_to_app_id(image_filename):
base_name = os.path.basename(image_filename)
app_id = base_name.strip(get_image_extension())
return app_id
def list_app_ids(is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filenames = Path(image_data_path).glob("*" + get_image_extension())
app_ids = [image_filename_to_app_id(filename) for filename in image_filenames]
app_ids = sorted(app_ids, key=int)
return app_ids
def get_frozen_app_ids_filename():
frozen_app_ids_filename = get_data_path() + "frozen_app_ids.txt"
return frozen_app_ids_filename
def freeze_app_ids(app_ids, output_file_name=None):
if output_file_name is None:
output_file_name = get_frozen_app_ids_filename()
with open(output_file_name, "w", encoding="utf8") as f:
for app_id in app_ids:
f.write("{}\n".format(app_id))
return
def load_frozen_app_ids(input_file_name=None):
if input_file_name is None:
input_file_name = get_frozen_app_ids_filename()
with open(input_file_name, "r", encoding="utf8") as f:
# Do not convert to a set object, or any other conversion, because we want to keep the list order as it is.
# Just read the list from the file. That is all there is to do. Otherwise, appIDs will be scrambled!
frozen_app_ids = [app_id.strip() for app_id in f.readlines()]
return frozen_app_ids
def get_frozen_app_ids(is_horizontal_banner=False):
try:
frozen_app_ids = load_frozen_app_ids()
except FileNotFoundError:
print("Creating {}".format(get_frozen_app_ids_filename()))
frozen_app_ids = list_app_ids(is_horizontal_banner=is_horizontal_banner)
freeze_app_ids(frozen_app_ids)
return frozen_app_ids
| 29.589041
| 115
| 0.747222
| 341
| 2,160
| 4.302053
| 0.255132
| 0.106339
| 0.130879
| 0.0818
| 0.316292
| 0.283572
| 0.225631
| 0.203136
| 0.155419
| 0.102249
| 0
| 0.001125
| 0.177315
| 2,160
| 72
| 116
| 30
| 0.824423
| 0.094444
| 0
| 0.097561
| 0
| 0
| 0.022529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.073171
| 0
| 0.414634
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc6f65558ddc1ad343876953a1a42d3ab2c832a9
| 348
|
py
|
Python
|
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import boto3
import random
import os
BUCKET=os.environ.get('EXPORT_S3_BUCKET_URL')
if (BUCKET != None):
s3 = boto3.client('s3')
with open("maze.txt", "rb") as f:
s3.upload_fileobj(f, BUCKET, "maze"+str(random.randrange(100000))+".txt")
else:
print("EXPORT_S3_BUCKET_URL was not set so not uploading file")
| 29
| 81
| 0.692529
| 56
| 348
| 4.178571
| 0.660714
| 0.068376
| 0.119658
| 0.145299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044068
| 0.152299
| 348
| 11
| 82
| 31.636364
| 0.749153
| 0.057471
| 0
| 0
| 0
| 0
| 0.287462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc709a454475435a2a06bba2371531a53e2d11c0
| 2,790
|
py
|
Python
|
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | 1
|
2019-10-14T23:36:14.000Z
|
2019-10-14T23:36:14.000Z
|
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """List realms in the server and it's configuration settings(optional).
Usage examples:
./manage.py list_realms
./manage.py list_realms --all"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--all",
dest="all",
action="store_true",
default=False,
help="Print all the configuration settings of the realms.")
def handle(self, *args: Any, **options: Any) -> None:
realms = Realm.objects.all()
outer_format = "%-5s %-40s %-40s"
inner_format = "%-40s %s"
deactivated = False
if not options["all"]:
print(outer_format % ("id", "string_id", "name"))
print(outer_format % ("--", "---------", "----"))
for realm in realms:
if realm.deactivated:
print(self.style.ERROR(outer_format % (realm.id, realm.string_id, realm.name)))
deactivated = True
else:
print(outer_format % (realm.id, realm.string_id, realm.name))
if deactivated:
print(self.style.WARNING("\nRed rows represent deactivated realms."))
sys.exit(0)
# The remaining code path is the --all case.
identifier_attributes = ["id", "name", "string_id"]
for realm in realms:
# Start with just all the fields on the object, which is
# hacky but doesn't require any work to maintain.
realm_dict = realm.__dict__
# Remove a field that is confusingly useless
del realm_dict['_state']
# Fix the one bitfield to display useful data
realm_dict['authentication_methods'] = str(realm.authentication_methods_dict())
for key in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, realm_dict[key])))
deactivated = True
else:
print(inner_format % (key, realm_dict[key]))
for key, value in sorted(realm_dict.iteritems()):
if key not in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, value)))
else:
print(inner_format % (key, value))
print("-" * 80)
if deactivated:
print(self.style.WARNING("\nRed is used to highlight deactivated realms."))
| 37.702703
| 99
| 0.557348
| 302
| 2,790
| 5.029801
| 0.39404
| 0.041475
| 0.065833
| 0.082291
| 0.269256
| 0.24819
| 0.223173
| 0.148782
| 0.148782
| 0.096116
| 0
| 0.005435
| 0.340502
| 2,790
| 73
| 100
| 38.219178
| 0.820109
| 0.083154
| 0
| 0.226415
| 0
| 0
| 0.155233
| 0.008624
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.09434
| 0
| 0.169811
| 0.207547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc72c71783ded10cb6bea92ec124528a890dca34
| 627
|
py
|
Python
|
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1
|
2019-05-30T10:10:20.000Z
|
2019-05-30T10:10:20.000Z
|
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | null | null | null |
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1
|
2017-09-01T03:35:18.000Z
|
2017-09-01T03:35:18.000Z
|
"""
Get all TM1 transactions for all cubes starting to a specific date.
"""
import configparser
config = configparser.ConfigParser()
config.read('..\config.ini')
from datetime import datetime
from TM1py.Services import TM1Service
with TM1Service(**config['tm1srv01']) as tm1:
# Timestamp for Message-Log parsing
timestamp = datetime(year=2018, month=2, day=15, hour=16, minute=2, second=0)
# Get all entries since timestamp
entries = tm1.server.get_transaction_log_entries(since=timestamp)
# loop through entries
for entry in entries:
# Do stuff
print(entry['TimeStamp'], entry)
| 24.115385
| 81
| 0.716108
| 82
| 627
| 5.439024
| 0.609756
| 0.026906
| 0.09417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039063
| 0.183413
| 627
| 25
| 82
| 25.08
| 0.832031
| 0.261563
| 0
| 0
| 0
| 0
| 0.066372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc73faf5f8cb49244c505ccd988f122eb6b59c66
| 6,019
|
py
|
Python
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 12
|
2018-03-02T21:48:46.000Z
|
2022-01-31T02:58:59.000Z
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 2
|
2018-07-19T01:11:11.000Z
|
2019-04-08T23:54:08.000Z
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 5
|
2018-03-06T18:13:36.000Z
|
2021-01-08T20:54:28.000Z
|
import logging
import numpy as np
from scipy.stats import entropy
from patteRNA.Transcript import Transcript
from patteRNA import filelib
logger = logging.getLogger(__name__)
class Dataset:
def __init__(self, fp_observations, fp_sequences=None, fp_references=None):
self.fp_obs = fp_observations
self.fp_fasta = fp_sequences
self.fp_refs = fp_references
self.rnas = dict()
self.stats = dict()
def load_rnas(self, log_flag=False):
observations_dict = filelib.parse_observations(self.fp_obs)
observations_rnas = set(observations_dict.keys())
dataset_rnas = observations_rnas
sequences_dict = None
if self.fp_fasta:
sequences_dict = filelib.parse_fasta(self.fp_fasta)
sequences_rnas = set(sequences_dict.keys())
# Cross reference input files to confirm all transcripts
for rna in observations_rnas.difference(sequences_rnas):
print('WARNING - No sequence found for RNA: {}'.format(rna))
sequences_dict[rna] = ''.join(['N'] * len(observations_dict[rna]))
for rna in sequences_rnas.difference(observations_rnas):
print('WARNING - No probing data found for RNA: {}'.format(rna))
observations_dict[rna] = np.tile(np.nan, len(sequences_dict[rna]))
dataset_rnas.update(sequences_rnas)
for rna_name in dataset_rnas:
if self.fp_fasta:
self.rnas[rna_name] = Transcript(rna_name, sequences_dict[rna_name], observations_dict[rna_name])
else:
self.rnas[rna_name] = Transcript(rna_name, 'N' * len(observations_dict[rna_name]),
observations_dict[rna_name])
if log_flag:
for rna in self.rnas:
self.rnas[rna].log_transform()
self.compute_stats()
def compute_stats(self):
"""
Parse all finite observations in the input file and compute some statistics on the data.
These statistics are mostly used to initialize parameters of the emission model before training.
"""
finite_obs = []
total_obs = 0
up_ref = 0
p_ref = 0
for rna in self.rnas:
finite_obs.extend(self.rnas[rna].obs[np.isfinite(self.rnas[rna].obs)])
total_obs += len(self.rnas[rna].obs)
up_ref += int(np.sum(self.rnas[rna].ref == 0))
p_ref += int(np.sum(self.rnas[rna].ref == 1))
self.stats['quantile_basis'] = np.linspace(0, 1, 1000)
self.stats['quantiles'] = np.quantile(finite_obs, self.stats["quantile_basis"])
self.stats['P25'], self.stats['P75'] = np.percentile(finite_obs, (25, 75))
self.stats['P40'], self.stats['P60'] = np.percentile(finite_obs, (40, 60))
self.stats['n_obs'] = len(finite_obs)
self.stats['up_ref'] = up_ref
self.stats['p_ref'] = p_ref
self.stats['total_obs'] = total_obs
self.stats['continuous_variance'] = np.var(finite_obs)
self.stats['minimum'] = np.min(finite_obs)
self.stats['maximum'] = np.max(finite_obs)
self.stats['finite_obs'] = finite_obs
self.stats['histogram_bins'] = np.linspace(self.stats['minimum'], self.stats['maximum'], 20)
self.stats['histogram'], _ = np.histogram(finite_obs,
bins=self.stats['histogram_bins'],
density=True)
def spawn_training_set(self, kl_div):
"""
Spawn a training set (smaller than or equal size to overall data) based on KL divergence criteria.
Transcripts are incrementally added to a training Dataset (high quality transcripts first) until
the training set's KL divergence from the overall data falls below the provided threshold.
"""
training_transcripts = []
training_obs = []
kl_div_set = 1.0
group_size = 20
logger.info(' ... sorting')
rnas_sd = sorted(self.rnas.values(), key=lambda transcript: transcript.density, reverse=True)
logger.info(' ... selecting')
while kl_div_set > kl_div and rnas_sd:
rnas = rnas_sd[:group_size]
rnas_sd[:group_size] = []
for rna in rnas:
training_transcripts.append(rna.name)
training_obs.extend(rna.obs[rna.mask_finite])
training_histogram, _ = np.histogram(training_obs,
bins=self.stats['histogram_bins'],
density=True)
kl_div_set = entropy(training_histogram, self.stats['histogram'])
training_set = self.spawn_set(rnas=training_transcripts)
training_set.compute_stats()
return training_set, kl_div_set
def pre_process(self, model, scoring=False):
if model.emission_model.type == 'DOM':
for rna in self.rnas:
model.emission_model.discretize(self.rnas[rna])
if scoring:
for rna in self.rnas.values():
model.e_step(rna)
rna.compute_log_B_ratios()
def get_emissions(self, model):
for rna in self.rnas:
model.emission_model.compute_emissions(self.rnas[rna])
def spawn_set(self, rnas):
spawned_set = Dataset(fp_observations=None, fp_sequences=None, fp_references=None)
spawned_set.rnas = {rna: self.rnas[rna] for rna in rnas}
return spawned_set
def spawn_reference_set(self):
spawned_set = Dataset(fp_observations=None, fp_references=None, fp_sequences=None)
references = [rna for rna in self.rnas if self.rnas[rna].ref is not None]
spawned_set.rnas = {rna: self.rnas[rna] for rna in references}
spawned_set.compute_stats()
return spawned_set
def clear(self):
self.rnas = None
self.stats = None
| 39.339869
| 113
| 0.6104
| 752
| 6,019
| 4.679521
| 0.232713
| 0.061381
| 0.040637
| 0.02046
| 0.194089
| 0.153453
| 0.137539
| 0.079
| 0.022734
| 0.022734
| 0
| 0.007694
| 0.287423
| 6,019
| 152
| 114
| 39.598684
| 0.812777
| 0.088054
| 0
| 0.110092
| 0
| 0
| 0.056109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082569
| false
| 0
| 0.045872
| 0
| 0.165138
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc74cac1c4d460520f828712a631b873e6f7c8d7
| 3,118
|
py
|
Python
|
src/Simulation/developer_0/main.py
|
GYRY-NEU/CS7610-Experiments
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | null | null | null |
src/Simulation/developer_0/main.py
|
GYRY-NEU/CS7610-Experiments
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | 1
|
2021-12-02T20:45:02.000Z
|
2021-12-02T20:45:02.000Z
|
src/Simulation/developer_0/main.py
|
GYRY-NEU/Simulation
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | null | null | null |
import library
import json
@library.export
def init(args):
model = [[9.2, 0.21, 0.21],
[8.2, 0.22, 0.21],
[7.2, 1.21, 2.41],
[1.2, 2.21, 0.29]]
library.put("model", model)
ROUND = 0
library.put("ROUND", ROUND)
alpha = 0.2
library.put("alpha", alpha)
@library.export
def clientUpload(args):
# get client model
client = json.loads(args["data"])
# client round
k = "round" + str(client["round"])
# save model to buckets
library.put_bucket(k, client["model"])
# if enough models
if library.count_bucket(k) > 20:
ROUND = library.get("ROUND")
# check client rounds == current rounds
if ROUND != client["round"]:
return False
# set round to -1 to prevent clients uploading to this bucket
library.put("ROUND", -1)
model = library.get("model")
list_weights = library.get_bucket(k)
model = updateModel(model, list_weights)
# save calculated model and restore round
library.put("model", model)
library.put("ROUND", ROUND+1)
return True
def updateModel(model, list_weights):
"""
list_weights : 3D list of shape : (clientNumber,modelOuter, modelInner)
It contains all the models for each client
"""
# this part will change developer to developer
# one can just take avg
# or one can discard smallest and largest than take average
# this example just takes avg without use of external library
alpha = library.get("alpha")
# getting shape of 3D array
number_clients = len(list_weights)
size_outer = len(list_weights[0])
size_inner = len(list_weights[0][0])
# constructing a new 2D array of zeros of same size
newModel = [ [0 for j in range(size_inner)] for i in range(size_outer)]
# validate new created shape
assert(len(newModel) == size_outer)
assert(len(newModel[0]) == size_inner)
# sum for all the clients
for weights in list_weights:
for outerIndex, outerList in enumerate(weights):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] += innerVal
# average it by number of clients
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] /= number_clients
# now update the model using the learning rate using below formula
# model = (1-a) * model + a * new_model
# Prev. part and next part could be merged for efficiency but readability they implemented with two loops
# Iterate over model
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
model[outerIndex][innerIndex] *= 1-alpha
model[outerIndex][innerIndex] += alpha * newModel[outerIndex][innerIndex]
# Finally update round number
return model
@library.export
def getModel(args):
return library.get("model")
@library.export
def getRound(args):
return library.get("ROUND")
| 29.695238
| 109
| 0.645606
| 407
| 3,118
| 4.896806
| 0.341523
| 0.044155
| 0.032112
| 0.036126
| 0.147516
| 0.130958
| 0.130958
| 0.130958
| 0.130958
| 0.082288
| 0
| 0.021524
| 0.254971
| 3,118
| 104
| 110
| 29.980769
| 0.836418
| 0.295382
| 0
| 0.2
| 0
| 0
| 0.036761
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 1
| 0.090909
| false
| 0
| 0.036364
| 0.036364
| 0.218182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc78a988ef549d86a9021df34f1480ea70a43721
| 11,516
|
py
|
Python
|
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | null | null | null |
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | null | null | null |
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | 1
|
2018-08-23T19:27:23.000Z
|
2018-08-23T19:27:23.000Z
|
from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
class GameState:
def __init__(self, to_move, position, board, label=None):
self.to_move = to_move
self.position = position
self.board = board
self.label = label
self.scores = {'H': 0, 'V': 0}
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Move:
def __init__(self, r, c, v):
self.row = r
self.col = c
self.value = v
def rcv(self):
return self.row, self.col, self.value
def __lt__(self, other):
return self.value > other.value
def q2list(mq):
list = []
while not mq.empty():
list.append(mq.get(1).rcv())
return list
def movesInRow(board, r):
mQueue = PriorityQueue()
row = board[r]
for c in range(len(row)):
if isnan(row[c]):
continue
v = row[c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
def movesInCol(board, c):
mQueue = PriorityQueue()
for r in range(len(board)):
if isnan(board[r][c]):
continue
v = board[r][c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
class ThinkAhead(Game):
"""
An implementation of ThinkAhead
"""
def __init__(self, state):
self.initial = state
def actions(self, state):
"Legal moves are any square not yet taken."
r, c = state.position
if state.to_move == 'H':
moves = movesInRow(state.board, r)
return moves
if state.to_move == 'V':
moves = movesInCol(state.board, c)
return moves
return []
# defines the order of play
def opponent(self, player):
if player == 'H':
return 'V'
if player == 'V':
return 'H'
return None
def result(self, state, move):
r, c, v = move
assert state.board[r][c] == v
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.position = r, c
newState.board[r][c] = nan
newState.scores[currMover] += v
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
print_table(state.board, njust='center', sep=',')
print('Score: ' + str(state.scores))
won = GameState(
to_move='H',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='won'
)
won.scores = {'H': 9, 'V': 0}
lost = GameState(
to_move='V',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='lost'
)
lost.scores = {'H': 0, 'V': 9}
winin1 = GameState(
to_move='H',
position=(1, 1),
board=[[nan, nan],
[9, nan]],
label='winin1'
)
losein1 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan],
[9, nan]],
label='losein1'
)
winin2 = GameState(
to_move='H',
position=(0, 0),
board=[[nan, 3, 2],
[nan, 9, nan],
[nan, nan, 1]],
label='winin2'
)
losein2 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan, nan],
[3, 9, nan],
[2, nan, 1]],
label='losein2'
)
losein2.maxDepth = 3
# http://www.kongregate.com/games/zolli/thinkahead-brain-trainer
stolen = GameState(
to_move='H',
position=(3, 1),
board=[[3, 8, 9, 5],
[9, 1, 3, 2],
[8, 6, 4, 4],
[9, nan, 1, 5]],
label='stolen'
)
choose1 = GameState(
to_move='H',
position=(1, 0),
board=[[3, 8, 9, 5],
[nan, 1, 3, 2],
[8, 6, 4, 4],
[nan, nan, 1, 5]],
label='choose1'
)
winby10 = GameState(
to_move='H',
position=(2, 0),
board=[[nan, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 6, 4, 5],
[nan, nan, 1, 3]],
label='winby10'
)
thinkA = ThinkAhead(stolen)
def availableMoves(board):
sides = ['T', 'B', 'L', 'R']
moves = PriorityQueue()
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == '':
for side in sides:
if side not in board[row][col]['lines']:
moves.put((row, col, side))
moveList = []
while not moves.empty():
moveList.append(moves.get(1))
return moveList
def applyMove(board, size, row, col, side, currMover):
board[row][col]['lines'].append(side)
if row <= size - 1 and row != 0 and side == 'T':
board[row - 1][col]['lines'].append('B')
if row >= 0 and row != size - 1 and side == 'B':
board[row + 1][col]['lines'].append('T')
if col <= size - 1 and col != 0 and side == 'L':
board[row][col - 1]['lines'].append('R')
if col >= 0 and col != size - 1 and side == 'R':
board[row][col + 1]['lines'].append('L')
sides = ['T', 'B', 'L', 'R']
complete = True
for side in sides:
if side in board[row][col]['lines']:
continue
complete = False
if complete:
board[row][col]['winner'] = currMover
return board
def countScore(board):
scores = {'A': 0, 'B': 0}
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == 'A':
scores['A'] += 1
if board[row][col]['winner'] == 'B':
scores['B'] += 1
return scores
board = '''
***
***
***
'''
def printDotsBoard(board):
board_string = ''
for row in range(0, len(board)):
for col in range(0, len(board[row])):
board_string += '*'
if 'T' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
if col == len(board[row]) - 1:
board_string += '*\n'
for space in range(0, len(board[row])):
if 'L' in board[row][space]['lines']:
board_string += '| '
else:
board_string += ' '
if '' != board[row][space]['winner']:
board_string += board[row][space]['winner']
else:
board_string += ' '
if space == len(board[row]) - 1 and 'R' in board[row][space]['lines']:
board_string += ' |'
else:
board_string += ' '
board_string += '\n'
if row == len(board) - 1:
for col in range(0, len(board[row])):
board_string += '*'
if 'B' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
board_string += '*'
print(board_string)
class DotLineState:
def __init__(self, to_move, board, label=None, scores={'A': 0, 'B': 0}):
self.to_move = to_move
self.board = board
self.label = label
self.scores = scores
def __str__(self):
if self.label is None:
return super(DotLineState, self).__str__()
return self.label
class DotsAndLines(Game):
"""
An implementation of Dots and Lines
"""
def __init__(self, state):
self.initial = state
self.size = len(state.board)
def actions(self, state):
"Legal moves are any square not yet taken."
moves = availableMoves(state.board)
return moves
# defines the order of play
def opponent(self, player):
if player == 'A':
return 'B'
if player == 'B':
return 'A'
return None
def result(self, state, move):
row, col, side = move
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.board = applyMove(newState.board, self.size, row, col, side, currMover)
newState.scores = countScore(newState.board)
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
# print_table(state.board, njust='center', sep=',')
printDotsBoard(state.board)
print('Score: ' + str(state.scores))
'''
Board represents the squares, whether the top, bottom, left, and
right have been filled, and which player owns the square.
'''
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
won = DotLineState(board=dotLineBoard, to_move='A', label='Won', scores={'A': 3, 'B': 1})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
lost = DotLineState(board=dotLineBoard, to_move='A', label='Lost', scores={'A': 1, 'B': 3})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
tied = DotLineState(board=dotLineBoard, to_move='A', label='Tied', scores={'A': 2, 'B': 2})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': '', 'lines': ['T', 'L']}]]
winin1Dots = DotLineState(board=dotLineBoard, to_move='A', label='Win in 1', scores={'A': 2, 'B': 1})
dotLineBoard = [[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['T', 'L']}, {'winner': '', 'lines': ['R']}],
[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}],
[{'winner': '', 'lines': ['B', 'L', 'R']}, {'winner': '', 'lines': ['L', 'B']}, {'winner': '', 'lines': ['B', 'R']}],
]
winIn5_3x3 = DotLineState(board=dotLineBoard, to_move='A', label='Win in 5', scores={'A': 0, 'B': 0})
play = DotLineState(
board=[[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}],
[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}]],
to_move='A', label='Start')
#amended by whh
dotLine = DotsAndLines(play)
#dotLine = DotsAndLines(winIn5_3x3)
myGames = {
dotLine: [
won,
lost,
tied,
winin1Dots,
winIn5_3x3,
play
]
}
| 27.951456
| 133
| 0.500174
| 1,414
| 11,516
| 4.008487
| 0.123762
| 0.028582
| 0.009527
| 0.011997
| 0.572689
| 0.514467
| 0.430487
| 0.379323
| 0.357269
| 0.330452
| 0
| 0.01817
| 0.311827
| 11,516
| 411
| 134
| 28.019465
| 0.697035
| 0.046631
| 0
| 0.403125
| 0
| 0
| 0.08773
| 0
| 0
| 0
| 0
| 0
| 0.003125
| 1
| 0.0875
| false
| 0
| 0.01875
| 0.00625
| 0.209375
| 0.021875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc798568d1c4c7d74cc7db30deace979155e8ddb
| 4,797
|
py
|
Python
|
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1
|
2022-02-18T04:02:52.000Z
|
2022-02-18T04:02:52.000Z
|
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
import os
import df2img
import disnake
import numpy as np
import pandas as pd
from menus.menu import Menu
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import gst_imgur, logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.stocks.options import yfinance_model
async def chain_command(
ctx,
ticker: str = None,
expiry: str = None,
opt_type: str = None,
min_sp: float = None,
max_sp: float = None,
):
"""Show calls/puts for given ticker and expiration"""
try:
# Debug
if cfg.DEBUG:
logger.debug(
"opt-chain %s %s %s %s %s", ticker, expiry, opt_type, min_sp, max_sp
)
# Check for argument
if not ticker:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
options = yfinance_model.get_option_chain(ticker, str(expiry))
calls_df = options.calls
puts_df = options.puts
column_map = {"openInterest": "oi", "volume": "vol", "impliedVolatility": "iv"}
columns = [
"strike",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
if opt_type == "Calls":
df = calls_df[columns].rename(columns=column_map)
if opt_type == "Puts":
df = puts_df[columns].rename(columns=column_map)
min_strike = np.percentile(df["strike"], 1)
max_strike = np.percentile(df["strike"], 100)
if min_sp:
min_strike = min_sp
if max_sp:
max_strike = max_sp
if min_sp > max_sp: # type: ignore
min_sp, max_sp = max_strike, min_strike
df = df[df["strike"] >= min_strike]
df = df[df["strike"] <= max_strike]
df["iv"] = pd.to_numeric(df["iv"].astype(float))
formats = {"iv": "{:.2f}"}
for col, f in formats.items():
df[col] = df[col].map(lambda x: f.format(x)) # pylint: disable=W0640
df.set_index("strike", inplace=True)
title = f"Stocks: {opt_type} Option Chain for {ticker.upper()} on {expiry} [yfinance]"
embeds: list = []
# Weekly Calls Pages
i, i2, end = 0, 0, 20
df_pg = []
embeds_img = []
dindex = len(df.index)
while i < dindex:
df_pg = df.iloc[i:end]
df_pg.append(df_pg)
figp = df2img.plot_dataframe(
df_pg,
fig_size=(1000, (40 + (40 * 20))),
col_width=[3, 3, 3, 3],
tbl_cells=dict(
height=35,
),
font=dict(
family="Consolas",
size=20,
),
template="plotly_dark",
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = f"opt-chain{i}.png"
df2img.save_dataframe(fig=figp, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
uploaded_image = gst_imgur.upload_image(imagefile, title="something")
image_link = uploaded_image.link
embeds_img.append(
f"{image_link}",
)
embeds.append(
disnake.Embed(
title=title,
colour=cfg.COLOR,
),
)
i2 += 1
i += 20
end += 20
os.remove(imagefile)
# Author/Footer
for i in range(0, i2):
embeds[i].set_author(
name=cfg.AUTHOR_NAME,
url=cfg.AUTHOR_URL,
icon_url=cfg.AUTHOR_ICON_URL,
)
embeds[i].set_footer(
text=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
i = 0
for i in range(0, i2):
embeds[i].set_image(url=embeds_img[i])
i += 1
embeds[0].set_footer(text=f"Page 1 of {len(embeds)}")
options = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
await ctx.send(embed=embeds[0], view=Menu(embeds, options))
except Exception as e:
embed = disnake.Embed(
title="ERROR Stock-Options: Expirations",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| 29.429448
| 94
| 0.518449
| 557
| 4,797
| 4.310592
| 0.332136
| 0.026239
| 0.019992
| 0.012495
| 0.164515
| 0.120367
| 0.069971
| 0.049979
| 0.049979
| 0
| 0
| 0.02055
| 0.371065
| 4,797
| 162
| 95
| 29.611111
| 0.774942
| 0.019179
| 0
| 0.090226
| 0
| 0
| 0.09085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.082707
| 0
| 0.082707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc7a203231a6818121284acfef4c18f0b9192863
| 2,312
|
py
|
Python
|
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | 4
|
2021-05-03T16:03:24.000Z
|
2022-02-17T16:08:49.000Z
|
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | null | null | null |
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | 1
|
2021-06-30T11:27:56.000Z
|
2021-06-30T11:27:56.000Z
|
#%% import packages
import numpy as np
import pandas as pd
import multiprocessing
from time import time
import json
from premiumFinance.constants import (
MORTALITY_TABLE_CLEANED_PATH,
PROCESSED_PROFITABILITY_PATH,
)
from premiumFinance.financing import calculate_lender_profit, yield_curve
mortality_experience = pd.read_excel(MORTALITY_TABLE_CLEANED_PATH)
#%% calculate profit rate
def get_average_profitability(
is_level_premium=True,
lapse_assumption=True,
policyholder_rate=yield_curve,
statutory_interest=0.035,
premium_markup=0.0,
cash_interest=0.001,
lender_coc=0.01,
data_frame=mortality_experience,
):
profit_columns = data_frame.apply(
lambda row: calculate_lender_profit(
row=row,
is_level_premium=is_level_premium,
lapse_assumption=lapse_assumption,
policyholder_rate=policyholder_rate,
statutory_interest=statutory_interest,
premium_markup=premium_markup,
cash_interest=cash_interest,
lender_coc=lender_coc,
),
axis=1,
result_type="expand",
)
data_frame[["Breakeven Loan rate", "Lender profit"]] = profit_columns
data_frame["Dollar profit"] = (
data_frame["Lender profit"] * data_frame["Amount Exposed"]
)
average_profitability = (
data_frame["Dollar profit"].sum() / data_frame["Amount Exposed"].sum()
)
return average_profitability, data_frame
def tempfunc_t(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=True)
return a
def tempfunc_f(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=False)
return a
lender_coc_value = np.arange(start=0.01, stop=0.2, step=0.01)
#%% tbd
if __name__ == "__main__":
pool = multiprocessing.Pool()
start_time = time()
foo = []
for tempfunc in (tempfunc_t, tempfunc_f):
foo.append(
pool.map(
tempfunc,
lender_coc_value,
)
)
print(f"it took {time() - start_time}")
lender_profitability = {
"lender_coc": lender_coc_value.tolist(),
"profitability": foo,
}
with open(PROCESSED_PROFITABILITY_PATH, "w") as outfile:
json.dump(lender_profitability, outfile)
| 26.574713
| 78
| 0.675606
| 272
| 2,312
| 5.404412
| 0.356618
| 0.055102
| 0.046939
| 0.034014
| 0.068027
| 0.068027
| 0.068027
| 0.068027
| 0.068027
| 0.068027
| 0
| 0.012415
| 0.233564
| 2,312
| 86
| 79
| 26.883721
| 0.817156
| 0.020761
| 0
| 0.029412
| 0
| 0
| 0.073419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.102941
| 0
| 0.191176
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc7acbb33a6b536b7d8fb8f0b3208c55dac034b1
| 5,700
|
py
|
Python
|
dashboard/dashboard/common/layered_cache.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/common/layered_cache.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T14:28:55.000Z
|
2022-01-12T14:28:55.000Z
|
dashboard/dashboard/common/layered_cache.py
|
atuchin-m/catapult
|
108ea3e2ec108e68216b1250a3d79cc642600294
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Caches processed query results in memcache and datastore.
Memcache is not very reliable for the perf dashboard. Prometheus team explained
that memcache is LRU and shared between multiple applications, so their activity
may result in our data being evicted. To prevent this, we cache processed
query results in the data store. Using NDB, the values are also cached in
memcache if possible. This improves performance because doing a get()
for a key which has a single BlobProperty is much quicker than a complex query
over a large dataset.
(Background: http://g/prometheus-discuss/othVtufGIyM/wjAS5djyG8kJ)
When an item is cached, layered_cache does the following:
1) Namespaces the key based on whether datastore_hooks says the request is
internal_only.
2) Pickles the value (memcache does this internally), and adds a data store
entity with the key and a BlobProperty with the pickled value.
Retrieving values checks memcache via NDB first, and if datastore is used it
unpickles.
When an item is removed from the the cache, it is removed from both internal and
external caches, since removals are usually caused by large changes that affect
both caches.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six.moves.cPickle as cPickle
import datetime
import logging
from google.appengine.api import datastore_errors
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from dashboard.common import datastore_hooks
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
class CachedPickledString(ndb.Model):
value = ndb.BlobProperty()
expire_time = ndb.DateTimeProperty()
@classmethod
def NamespacedKey(cls, key, namespace):
return ndb.Key(cls.__name__,
namespaced_stored_object.NamespaceKey(key, namespace))
@classmethod
def GetExpiredKeys(cls):
"""Gets keys of expired entities.
Returns:
List of keys for items which are expired.
"""
current_time = datetime.datetime.now()
query = cls.query(cls.expire_time < current_time)
query = query.filter(cls.expire_time != None)
return query.fetch(keys_only=True)
def Get(key):
"""Gets the value from the datastore."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(key)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def GetExternal(key):
"""Gets the value from the datastore for the externally namespaced key."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(
key, datastore_hooks.EXTERNAL)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def Set(key, value, days_to_keep=None, namespace=None):
"""Sets the value in the datastore.
Args:
key: The key name, which will be namespaced.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
namespace: Optional namespace, otherwise namespace will be retrieved
using datastore_hooks.GetNamespace().
"""
# When number of days to keep is given, calculate expiration time for
# the entity and store it in datastore.
# Once the entity expires, it will be deleted from the datastore.
expire_time = None
if days_to_keep:
expire_time = datetime.datetime.now() + datetime.timedelta(
days=days_to_keep)
namespaced_key = namespaced_stored_object.NamespaceKey(key, namespace)
try:
CachedPickledString(
id=namespaced_key, value=cPickle.dumps(value),
expire_time=expire_time).put()
except datastore_errors.BadRequestError as e:
logging.warning('BadRequestError for key %s: %s', key, e)
except apiproxy_errors.RequestTooLargeError as e:
stored_object.Set(key, value)
def SetExternal(key, value, days_to_keep=None):
"""Sets the value in the datastore for the externally namespaced key.
Needed for things like /add_point that update internal/external data at the
same time.
Args:
key: The key name, which will be namespaced as externally_visible.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
"""
Set(key, value, days_to_keep, datastore_hooks.EXTERNAL)
@ndb.synctasklet
def Delete(key):
"""Clears the value from the datastore."""
yield DeleteAsync(key)
@ndb.tasklet
def DeleteAsync(key):
unnamespaced_future = stored_object.DeleteAsync(key)
# See the comment in stored_object.DeleteAsync() about this get().
entities = yield ndb.get_multi_async([
CachedPickledString.NamespacedKey(key, datastore_hooks.INTERNAL),
CachedPickledString.NamespacedKey(key, datastore_hooks.EXTERNAL),
])
keys = [entity.key for entity in entities if entity]
yield (unnamespaced_future, ndb.delete_multi_async(keys))
def DeleteAllExpiredEntities():
"""Deletes all expired entities from the datastore."""
ndb.delete_multi(CachedPickledString.GetExpiredKeys())
| 35.403727
| 80
| 0.759649
| 801
| 5,700
| 5.293383
| 0.318352
| 0.031132
| 0.023585
| 0.032075
| 0.289387
| 0.256368
| 0.228302
| 0.174057
| 0.174057
| 0.156132
| 0
| 0.002119
| 0.172105
| 5,700
| 160
| 81
| 35.625
| 0.896376
| 0.47193
| 0
| 0.216216
| 0
| 0
| 0.023328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121622
| false
| 0
| 0.162162
| 0.013514
| 0.432432
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc7af78333f9477d4091f8e1379313d34cf45c32
| 2,241
|
py
|
Python
|
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
"""
import cudf
from hypergbm import make_experiment
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
def main(target='y', dtype=None, max_trials=3, drift_detection=False, clear_cache=True, **kwargs):
tb = get_tool_box(cudf.DataFrame)
assert isinstance(tb, type) and tb.__name__ == 'CumlToolBox'
print("preparing...")
df = dsutils.load_bank()
if dtype is not None:
df[target] = df[target].astype(dtype)
df, = tb.from_local(df)
assert isinstance(df, cudf.DataFrame)
df_train, df_test = tb.train_test_split(df, test_size=0.5, random_state=123)
X_test = df_test
y_test = X_test.pop(target)
exp = make_experiment(df_train, target=target,
test_data=X_test.copy(),
max_trials=max_trials,
drift_detection=drift_detection,
clear_cache=clear_cache,
**kwargs)
print('experiment:', f'{[s.name for s in exp.steps]}', 'random_state', exp.random_state)
print("training...")
estimator = exp.run()
print('estimator pipeline:', f'{[s[0] for s in estimator.steps]}')
print("scoring...")
y_pred = estimator.predict(X_test)
y_proba = estimator.predict_proba(X_test)
task = exp.task
if task == 'regression':
metrics = ['mse', 'mae', 'msle', 'rmse', 'r2']
else:
metrics = ['auc', 'accuracy', 'f1', 'recall']
result = tb.metrics.calc_score(y_test, y_pred, y_proba, task=task, metrics=metrics,
pos_label=kwargs.get('pos_label', None))
print(result)
return exp, estimator
if __name__ == '__main__':
main(target='y', reward_metric='auc', ensemble_size=10, pos_label='yes', log_level='info', max_trials=10)
# main(target='y', max_trials=10, cv=False, ensemble_size=0, verbose=0, pos_label='yes', )
# main(target='day', reward_metric='f1', ensemble_size=10, log_level='info', max_trials=5)
# main(target='day', dtype='str', reward_metric='f1', ensemble_size=0, log_level='info', max_trials=6)
# main(target='age', dtype='float', ensemble_size=10, log_level='info', max_trials=8)
| 35.571429
| 109
| 0.630076
| 305
| 2,241
| 4.386885
| 0.363934
| 0.053812
| 0.035874
| 0.044843
| 0.113602
| 0.052317
| 0.052317
| 0.052317
| 0
| 0
| 0
| 0.016037
| 0.220884
| 2,241
| 62
| 110
| 36.145161
| 0.750286
| 0.171798
| 0
| 0
| 0
| 0
| 0.120521
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.025
| false
| 0
| 0.1
| 0
| 0.15
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc7dad2207dc0af9eee6f35c716072b33850e1f6
| 252
|
py
|
Python
|
Inserter.py
|
DarthSpector/Poster-Adder
|
97a86338987dd8cbcdf56414f53932c0370dcfc2
|
[
"MIT"
] | null | null | null |
Inserter.py
|
DarthSpector/Poster-Adder
|
97a86338987dd8cbcdf56414f53932c0370dcfc2
|
[
"MIT"
] | null | null | null |
Inserter.py
|
DarthSpector/Poster-Adder
|
97a86338987dd8cbcdf56414f53932c0370dcfc2
|
[
"MIT"
] | null | null | null |
def pictureInserter(og,address,list):
j=0
for i in og:
file1 = open(address+'/'+i, "a")
x="\ncover::https://image.tmdb.org/t/p/original/"+list[j]
file1.writelines(x)
file1.close()
j=j+1
| 25.2
| 66
| 0.503968
| 35
| 252
| 3.628571
| 0.714286
| 0.07874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02924
| 0.321429
| 252
| 10
| 67
| 25.2
| 0.71345
| 0
| 0
| 0
| 0
| 0
| 0.192623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc80bac77478aab3a7595a594bb0b4822d3d20bb
| 8,746
|
py
|
Python
|
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : sm4.py
@Description : sm4加密算法的实现
@Date : 2021/10/28 15:59:51
@Author : ZelKnow
@Github : https://github.com/ZelKnow
"""
__author__ = "ZelKnow"
from argparse import ArgumentParser, ArgumentError
from binascii import hexlify, unhexlify
from utils import S_BOX, BLOCK_BYTE, FK, CK, BLOCK_HEX
from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding
ENCRYPT = 0 # 加密
DECRYPT = 1 # 解密
class CryptSM4(object):
def __init__(self):
self.rk = []
def T(self, A, L_func):
"""合成置换函数T
T(.) = L(\tau(.))
Args:
A (int): 输入数据
L_func (function): 线性变换L
Returns:
int: 输出数据
"""
B = [S_BOX[(A >> i) & (0x000000ff)] for i in range(0, 32, 8)]
B = [B[i] << (i * 8) for i in range(4)]
C = L_func(sum(B))
return C
def L(self, input):
"""线性变换L,用于轮函数中
L(B) = B ^ (B <<< 2) ^ (B <<< 10) ^ (B <<< 18) ^ (B <<< 24)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 2) ^ rotl(input, 10) ^ rotl(
input, 18) ^ rotl(input, 24)
def L_prime(self, input):
"""线性变换L',用于密钥扩展算法
L'(B) = B ^ (B <<< 13) ^ (B <<< 23)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 13) ^ rotl(input, 23)
def check_key_iv(self, key_iv):
"""检验key或iv的合法性并转换成字节串
Args:
key_iv (int, str or bytes): key或iv
Raises:
TypeError: 密钥或初始化向量类型错误
ValueError: 密钥或初始化向量长度过长
Returns:
bytes: key或iv
"""
if isinstance(key_iv, str):
key_iv = key_iv.encode(encoding='UTF8')
elif isinstance(key_iv, int):
print(len(num2hex(key_iv, width=32)))
key_iv = unhexlify(num2hex(key_iv, width=32))
elif not isinstance(key_iv, bytes):
raise TypeError("密钥或初始化向量类型错误")
if len(key_iv) > BLOCK_BYTE:
raise ValueError('密钥或初始化向量长度不能大于{}'.format(BLOCK_BYTE))
return unhexlify('00') * (BLOCK_BYTE - len(key_iv)) + key_iv
def set_key(self, key):
"""设置key
Args:
key (int, str or bytes): 密钥
"""
key = self.check_key_iv(key)
input = bytes_to_list(hexlify(key), BLOCK_HEX / 4)
input = [int(i, 16) for i in input]
K = [input[i] ^ FK[i] for i in range(4)] # 存储轮密钥
for i in range(32): # 密钥扩展算法
K.append(K[i] ^ self.T(K[i + 1] ^ K[i + 2] ^ K[i + 3]
^ CK[i], self.L_prime))
self.rk = K[4:]
def F(self, X, i):
"""轮函数F
F = X_0 ^ T(X_1 ^ X_2 ^ X_3 ^ rk)
其中输入为(X_0, X_1, X_2, X_3),轮密钥为rk
Args:
X (list): 输入
i (int): 轮密钥的下标
Returns:
int: 输出
"""
return X[0] ^ self.T(X[1] ^ X[2] ^ X[3] ^ self.rk[i], self.L)
def _crypt(self, x, mode=ENCRYPT):
"""加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input = [(x >> i) & (0xffffffff) for i in reversed(range(0, 128, 32))]
# 加解密时使用的轮密钥顺序不同
for i in range(32) if mode == ENCRYPT else reversed(range(32)):
input.append(self.F(input[-4:], i)) # 32次迭代运算
output = input[-4:]
output = [output[i] << (i * 32) for i in range(4)] # 反序变换
return sum(output)
def encrypt(self, x):
"""加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt(x, ENCRYPT)
def decrypt(self, x):
"""解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
return self._crypt(x, DECRYPT)
def _crypt_ECB(self, input, mode=ENCRYPT):
"""ECB加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input_list = bytes_to_list(input, BLOCK_BYTE) # 将输入拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = [self._crypt(x, mode) for x in input_list] # 分别加解密
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
] # 转成字节流
return list_to_bytes(output_list) # 合并
def encrypt_ECB(self, plain_text):
"""ECB加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_ECB(padding(plain_text), ENCRYPT)
def decrypt_ECB(self, cipher_text):
"""ECB解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_ECB(cipher_text, DECRYPT))
def _crypt_CBC(self, input, iv, mode=ENCRYPT):
"""CBC加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
iv = int(hexlify(self.check_key_iv(iv)), 16) # 初始化向量
input_list = bytes_to_list(input, BLOCK_BYTE) # 拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = []
for x in input_list:
if mode == ENCRYPT:
output_list.append(self._crypt(x ^ iv, mode))
iv = output_list[-1]
else:
output_list.append(self._crypt(x, mode) ^ iv)
iv = x
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
]
return list_to_bytes(output_list)
def encrypt_CBC(self, plain_text, iv):
"""CBC加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_CBC(padding(plain_text), iv, ENCRYPT)
def decrypt_CBC(self, cipher_text, iv):
"""CBC解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_CBC(cipher_text, iv, DECRYPT))
if __name__ == '__main__':
parser = ArgumentParser(description="SM4加解密")
parser.add_argument('crypt', choices=['encrypt', 'decrypt'], help='加密或解密')
parser.add_argument('mode', choices=['ecb', 'cbc'], help='加密模式')
parser.add_argument('source', help='加密/解密目标')
parser.add_argument('key', help='密钥')
parser.add_argument('--iv', help='初始化向量,cbc模式使用')
parser.add_argument('--source_type',
choices=['input', 'bin_file', 'image'],
help='加密目标类型',
default='input')
parser.add_argument('--output', help='输出文件名,如不指定则输出至标准输出流')
args = parser.parse_args()
c = CryptSM4()
c.set_key(args.key)
if args.mode == 'cbc' and args.iv is None:
raise ArgumentError("请输入初始化向量的值")
if args.source_type == 'input':
input = args.source
if input[:2].lower() == '0x':
input = int(input[2:], 16)
elif args.source_type == 'bin_file':
with open(args.source, 'rb') as f:
input = f.read()
else:
from PIL import Image
import numpy as np
source = Image.open(args.source)
img = np.array(source.convert('RGBA'))
shape = img.shape
size = img.size
input = unhexlify(''.join([num2hex(i, width=2)
for i in img.flatten()]))
if args.crypt == 'encrypt':
output = c.encrypt_ECB(input) if args.mode == 'ecb' else c.encrypt_CBC(
input, args.iv)
else:
output = c.decrypt_ECB(input) if args.mode == 'ecb' else c.decrypt_CBC(
input, args.iv)
if args.source_type == 'image':
output = hexlify(output).decode()
output = output[:size * 2]
output = [[int(output[i + j:i + j + 2], 16) for j in range(0, 8, 2)]
for i in range(0, len(output), 8)]
output = np.array(output)
output = Image.fromarray(output.reshape(shape).astype('uint8'))
output.save(args.output)
elif args.output:
with open(args.output, "wb") as f:
f.write(output)
else:
try:
print(output.decode())
except:
print(hexlify(output).decode())
| 27.677215
| 81
| 0.511091
| 1,078
| 8,746
| 4.010204
| 0.20872
| 0.018506
| 0.016655
| 0.017812
| 0.293084
| 0.25746
| 0.228314
| 0.215128
| 0.188526
| 0.188526
| 0
| 0.02364
| 0.356735
| 8,746
| 315
| 82
| 27.765079
| 0.744756
| 0.1882
| 0
| 0.175676
| 0
| 0
| 0.040603
| 0
| 0
| 0
| 0.003172
| 0
| 0
| 1
| 0.108108
| false
| 0.013514
| 0.040541
| 0
| 0.25
| 0.02027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc80de56a04c7ef4be7293dbeb997c760a19a788
| 1,750
|
py
|
Python
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 5
|
2017-05-15T07:21:29.000Z
|
2022-03-02T01:01:47.000Z
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 2
|
2017-05-15T07:57:36.000Z
|
2021-09-23T06:22:34.000Z
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 10
|
2017-05-29T06:53:42.000Z
|
2020-05-22T10:29:00.000Z
|
import json
import requests
from random import randint
class sendotp:
def __init__(self, key, msg):
self.baseUrl = "http://control.msg91.com"
self.authkey = key
try:
msg
except NameError:
self.msg = "Your otp is {{otp}}. Please do not share it with anybody"
else:
self.msg = msg
def actionURLBuilder(self, actionurl):
# print self.baseUrl + '/api/' +str(actionurl)
print (actionurl)
return self.baseUrl + '/api/' + str(actionurl)
def generateOtp(self):
return randint(1000, 9999)
def send(self, contactNumber, senderId, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'message': self.msg.replace("{{otp}}", str(otp)),
'sender': senderId,
'otp': otp
}
print (self.call('sendotp.php', values))
return otp
def retry(self, contactNumber, retrytype='voice'):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'retrytype': retrytype
}
print (values)
response = self.call('retryotp.php', values)
return;
def verify(self, contactNumber, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'otp': otp
}
response = self.call('verifyRequestOTP.php', values)
return response;
def call(self, actionurl, args):
url = self.actionURLBuilder(actionurl)
print (url)
payload = (args)
response = requests.post(url, data=payload, verify=False)
print (response.text)
return response.status_code
| 26.515152
| 81
| 0.552
| 173
| 1,750
| 5.554913
| 0.393064
| 0.045786
| 0.05307
| 0.074922
| 0.194589
| 0.140479
| 0.095734
| 0
| 0
| 0
| 0
| 0.008562
| 0.332571
| 1,750
| 65
| 82
| 26.923077
| 0.814212
| 0.025143
| 0
| 0.211538
| 0
| 0
| 0.121479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0
| 0.057692
| 0.019231
| 0.307692
| 0.096154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc8179bb642e9880741040dd5588b31584a47da9
| 528
|
py
|
Python
|
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
class Solution:
def removeOuterParentheses(self, s: str) -> str:
ans = []
ct = 0
for ch in s:
if ch == '(':
ct += 1
if ct != 1:
ans.append(ch)
else:
ct -= 1
if ct != 0:
ans.append(ch)
return ''.join(ans)
if __name__ == '__main__':
# s = '(()())(())'
# s = '(()())(())(()(()))'
s = '()()'
ret = Solution().removeOuterParentheses(s)
print(ret)
| 22.956522
| 52
| 0.350379
| 49
| 528
| 3.612245
| 0.469388
| 0.050847
| 0.056497
| 0.079096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017731
| 0.465909
| 528
| 22
| 53
| 24
| 0.609929
| 0.077652
| 0
| 0.111111
| 0
| 0
| 0.02686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc821cb035c84e746a53eb83ce1e63b3b5c31ae6
| 9,911
|
py
|
Python
|
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from .translators import translate_js, DEFAULT_HEADER
from .es6 import js6_to_js5
import sys
import time
import json
import six
import os
import hashlib
import codecs
__all__ = [
'EvalJs', 'translate_js', 'import_js', 'eval_js', 'translate_file',
'eval_js6', 'translate_js6', 'run_file', 'disable_pyimport',
'get_file_contents', 'write_file_contents'
]
DEBUG = False
def disable_pyimport():
import pyjsparser.parser
pyjsparser.parser.ENABLE_PYIMPORT = False
def path_as_local(path):
if os.path.isabs(path):
return path
# relative to cwd
return os.path.join(os.getcwd(), path)
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python()
def get_file_contents(path_or_file):
if hasattr(path_or_file, 'read'):
js = path_or_file.read()
else:
with codecs.open(path_as_local(path_or_file), "r", "utf-8") as f:
js = f.read()
return js
def write_file_contents(path_or_file, contents):
if hasattr(path_or_file, 'write'):
path_or_file.write(contents)
else:
with open(path_as_local(path_or_file), 'w') as f:
f.write(contents)
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out)
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context
def eval_js(js):
"""Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
"""
e = EvalJs()
return e.eval(js)
def eval_js6(js):
"""Just like eval_js but with experimental support for js6 via babel."""
return eval_js(js6_to_js5(js))
def translate_js6(js):
"""Just like translate_js but with experimental support for js6 via babel."""
return translate_js(js6_to_js5(js))
class EvalJs(object):
"""This class supports continuous execution of javascript under same context.
>>> ctx = EvalJs()
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
>>> ctx.f(9)
81
>>> ctx.a
10
context is a python dict or object that contains python variables that should be available to JavaScript
For example:
>>> ctx = EvalJs({'a': 30})
>>> ctx.execute('var x = a')
>>> ctx.x
30
You can enable JS require function via enable_require. With this feature enabled you can use js modules
from npm, for example:
>>> ctx = EvalJs(enable_require=True)
>>> ctx.execute("var esprima = require('esprima');")
>>> ctx.execute("esprima.parse('var a = 1')")
You can run interactive javascript console with console method!"""
def __init__(self, context={}, enable_require=False):
self.__dict__['_context'] = {}
exec (DEFAULT_HEADER, self._context)
self.__dict__['_var'] = self._context['var'].to_python()
if enable_require:
def _js_require_impl(npm_module_name):
from .node_import import require
from .base import to_python
return require(to_python(npm_module_name), context=self._context)
setattr(self._var, 'require', _js_require_impl)
if not isinstance(context, dict):
try:
context = context.__dict__
except:
raise TypeError(
'context has to be either a dict or have __dict__ attr')
for k, v in six.iteritems(context):
setattr(self._var, k, v)
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context)
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult']
def eval_js6(self, expression, use_compilation_plan=False):
"""same as eval, except that the JS code gets translated from es6 to es5 before being executed."""
es5_expression = js6_to_js5(expression)
return self.eval(es5_expression, use_compilation_plan)
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult']
@property
def context(self):
return self._context
def __getattr__(self, var):
return getattr(self._var, var)
def __getitem__(self, var):
return getattr(self._var, var)
def __setattr__(self, var, val):
return setattr(self._var, var, val)
def __setitem__(self, var, val):
return setattr(self._var, var, val)
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
#print x
if __name__ == '__main__':
#with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f:
# x = f.read()
e = EvalJs()
e.execute('square(x)')
#e.execute(x)
e.console()
| 34.058419
| 117
| 0.613258
| 1,289
| 9,911
| 4.550039
| 0.250582
| 0.01023
| 0.01705
| 0.01023
| 0.205968
| 0.188235
| 0.136573
| 0.121398
| 0.10503
| 0.069565
| 0
| 0.008329
| 0.285239
| 9,911
| 290
| 118
| 34.175862
| 0.819593
| 0.341641
| 0
| 0.169811
| 0
| 0
| 0.086289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144654
| false
| 0.006289
| 0.113208
| 0.031447
| 0.36478
| 0.006289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc836898d52446a26166934c5d0c314e5b3ac86f
| 24,883
|
py
|
Python
|
tools/verity_utils.py
|
FabriSC/Alioth-SC
|
bbe9723401b351c2a34b09a30978373d456d20a2
|
[
"MIT"
] | 3
|
2022-03-16T12:31:10.000Z
|
2022-03-23T04:20:20.000Z
|
bin/verity_utils.py
|
affggh/NH4RomTool
|
84b06f9cc5f268c14c7af25e91c8b242188c70f7
|
[
"Apache-2.0"
] | null | null | null |
bin/verity_utils.py
|
affggh/NH4RomTool
|
84b06f9cc5f268c14c7af25e91c8b242188c70f7
|
[
"Apache-2.0"
] | 1
|
2022-03-30T04:47:35.000Z
|
2022-03-30T04:47:35.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os.path
import shlex
import struct
import common
import sparse_img
from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
class BuildVerityImageError(Exception):
"""An Exception raised during verity image building."""
def __init__(self, message):
Exception.__init__(self, message)
def GetVerityFECSize(image_size):
cmd = ["fec", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityTreeSize(image_size):
cmd = ["build_verity_tree", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityMetadataSize(image_size):
cmd = ["build_verity_metadata", "size", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVeritySize(image_size, fec_supported):
verity_tree_size = GetVerityTreeSize(image_size)
verity_metadata_size = GetVerityMetadataSize(image_size)
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
fec_size = GetVerityFECSize(image_size + verity_size)
return verity_size + fec_size
return verity_size
def GetSimgSize(image_file):
simg = sparse_img.SparseImage(image_file, build_map=False)
return simg.blocksize * simg.total_blocks
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
verity_path, verity_fec_path]
common.RunAndCheckOutput(cmd)
def BuildVerityTree(sparse_image_path, verity_image_path):
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
verity_image_path]
output = common.RunAndCheckOutput(cmd)
root, salt = output.split()
return root, salt
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args,
verity_disable):
cmd = ["build_verity_metadata", "build", str(image_size),
verity_metadata_path, root_hash, salt, block_device, signer_path, key]
if signer_args:
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
if verity_disable:
cmd.append("--verity_disable")
common.RunAndCheckOutput(cmd)
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Raises:
BuildVerityImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
try:
common.RunAndCheckOutput(cmd)
except:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def Append(target, file_to_append, error_message):
"""Appends file_to_append to target.
Raises:
BuildVerityImageError: On error.
"""
try:
with open(target, 'ab') as out_file, \
open(file_to_append, 'rb') as input_file:
for line in input_file:
out_file.write(line)
except IOError:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
Args:
prop_dict: A dict that contains the build properties. In particular, it will
look for verity-related property values.
Returns:
A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
None if the given build doesn't support Verified Boot.
"""
partition_size = prop_dict.get("partition_size")
# partition_size could be None at this point, if using dynamic partitions.
if partition_size:
partition_size = int(partition_size)
# Verified Boot 1.0
verity_supported = prop_dict.get("verity") == "true"
is_verity_partition = "verity_block_device" in prop_dict
if verity_supported and is_verity_partition:
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path
else:
signer_path = prop_dict["verity_signer_cmd"]
return Version1VerityImageBuilder(
partition_size,
prop_dict["verity_block_device"],
prop_dict.get("verity_fec") == "true",
signer_path,
prop_dict["verity_key"] + ".pk8",
OPTIONS.verity_signer_args,
"verity_disable" in prop_dict)
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
# Image uses hash footer.
if prop_dict.get("avb_hash_enable") == "true":
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hash_footer_args"])
# Image uses hashtree footer.
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hashtree_footer_args"])
return None
class VerityImageBuilder(object):
"""A builder that generates an image with verity metadata for Verified Boot.
A VerityImageBuilder instance handles the works for building an image with
verity metadata for supporting Android Verified Boot. This class defines the
common interface between Verified Boot 1.0 and Verified Boot 2.0. A matching
builder will be returned based on the given build properties.
More info on the verity image generation can be found at the following link.
https://source.android.com/security/verifiedboot/dm-verity#implementation
"""
def CalculateMaxImageSize(self, partition_size):
"""Calculates the filesystem image size for the given partition size."""
raise NotImplementedError
def CalculateDynamicPartitionSize(self, image_size):
"""Calculates and sets the partition size for a dynamic partition."""
raise NotImplementedError
def PadSparseImage(self, out_file):
"""Adds padding to the generated sparse image."""
raise NotImplementedError
def Build(self, out_file):
"""Builds the verity image and writes it to the given file."""
raise NotImplementedError
class Version1VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 1.0."""
def __init__(self, partition_size, block_dev, fec_supported, signer_path,
signer_key, signer_args, verity_disable):
self.version = 1
self.partition_size = partition_size
self.block_device = block_dev
self.fec_supported = fec_supported
self.signer_path = signer_path
self.signer_key = signer_key
self.signer_args = signer_args
self.verity_disable = verity_disable
self.image_size = None
self.verity_size = None
def CalculateDynamicPartitionSize(self, image_size):
# This needs to be implemented. Note that returning the given image size as
# the partition size doesn't make sense, as it will fail later.
raise NotImplementedError
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates the max image size by accounting for the verity metadata.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The size of the image adjusted for verity metadata.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
verity_size = GetVeritySize(hi, self.fec_supported)
lo = partition_size - verity_size
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
v = GetVeritySize(i, self.fec_supported)
if i + v <= partition_size:
if result < i:
result = i
verity_size = v
lo = i + BLOCK_SIZE
else:
hi = i
self.image_size = result
self.verity_size = verity_size
logger.info(
"Calculated image size for verity: partition_size %d, image_size %d, "
"verity_size %d", partition_size, result, verity_size)
return result
def Build(self, out_file):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the output image.
Returns:
AssertionError: On invalid partition sizes.
BuildVerityImageError: On other errors.
"""
image_size = int(self.image_size)
tempdir_name = common.MakeTempDir(suffix="_verity_images")
# Get partial image paths.
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
# Build the verity tree and get the root hash and salt.
root_hash, salt = BuildVerityTree(out_file, verity_image_path)
# Build the metadata blocks.
BuildVerityMetadata(
image_size, verity_metadata_path, root_hash, salt, self.block_device,
self.signer_path, self.signer_key, self.signer_args,
self.verity_disable)
padding_size = self.partition_size - self.image_size - self.verity_size
assert padding_size >= 0
# Build the full verified image.
Append(
verity_image_path, verity_metadata_path,
"Failed to append verity metadata")
if self.fec_supported:
# Build FEC for the entire partition, including metadata.
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
BuildVerityFEC(
out_file, verity_image_path, verity_fec_path, padding_size)
Append(verity_image_path, verity_fec_path, "Failed to append FEC")
Append2Simg(
out_file, verity_image_path, "Failed to append verity data")
def PadSparseImage(self, out_file):
sparse_image_size = GetSimgSize(out_file)
if sparse_image_size > self.image_size:
raise BuildVerityImageError(
"Error: image size of {} is larger than partition size of "
"{}".format(sparse_image_size, self.image_size))
ZeroPadSimg(out_file, self.image_size - sparse_image_size)
class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 2.0."""
AVB_HASH_FOOTER = 1
AVB_HASHTREE_FOOTER = 2
def __init__(self, partition_name, partition_size, footer_type, avbtool,
key_path, algorithm, salt, signing_args):
self.version = 2
self.partition_name = partition_name
self.partition_size = partition_size
self.footer_type = footer_type
self.avbtool = avbtool
self.algorithm = algorithm
self.key_path = key_path
self.salt = salt
self.signing_args = signing_args
self.image_size = None
def CalculateMinPartitionSize(self, image_size, size_calculator=None):
"""Calculates min partition size for a given image size.
This is used when determining the partition size for a dynamic partition,
which should be cover the given image size (for filesystem files) as well as
the verity metadata size.
Args:
image_size: The size of the image in question.
size_calculator: The function to calculate max image size
for a given partition size.
Returns:
The minimum partition size required to accommodate the image size.
"""
if size_calculator is None:
size_calculator = self.CalculateMaxImageSize
# Use image size as partition size to approximate final partition size.
image_ratio = size_calculator(image_size) / float(image_size)
# Prepare a binary search for the optimal partition size.
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
# Ensure lo is small enough: max_image_size should <= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(lo)
while max_image_size > image_size:
image_ratio = max_image_size / float(lo)
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
delta *= 2
max_image_size = size_calculator(lo)
hi = lo + BLOCK_SIZE
# Ensure hi is large enough: max_image_size should >= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(hi)
while max_image_size < image_size:
image_ratio = max_image_size / float(hi)
hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
delta *= 2
max_image_size = size_calculator(hi)
partition_size = hi
# Start to binary search.
while lo < hi:
mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
max_image_size = size_calculator(mid)
if max_image_size >= image_size: # if mid can accommodate image_size
if mid < partition_size: # if a smaller partition size is found
partition_size = mid
hi = mid
else:
lo = mid + BLOCK_SIZE
logger.info(
"CalculateMinPartitionSize(%d): partition_size %d.", image_size,
partition_size)
return partition_size
def CalculateDynamicPartitionSize(self, image_size):
self.partition_size = self.CalculateMinPartitionSize(image_size)
return self.partition_size
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates max image size for a given partition size.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The maximum image size.
Raises:
BuildVerityImageError: On error or getting invalid image size.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer, "--partition_size",
str(partition_size), "--calc_max_image_size"]
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError(
"Failed to calculate max image size:\n{}".format(output))
image_size = int(output)
if image_size <= 0:
raise BuildVerityImageError(
"Invalid max image size: {}".format(output))
self.image_size = image_size
return image_size
def PadSparseImage(self, out_file):
# No-op as the padding is taken care of by avbtool.
pass
def Build(self, out_file):
"""Adds dm-verity hashtree and AVB metadata to an image.
Args:
out_file: Path to image to modify.
"""
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer,
"--partition_size", str(self.partition_size),
"--partition_name", self.partition_name,
"--image", out_file]
if self.key_path and self.algorithm:
cmd.extend(["--key", self.key_path, "--algorithm", self.algorithm])
if self.salt:
cmd.extend(["--salt", self.salt])
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
def __init__(self, message):
Exception.__init__(self, message)
class HashtreeInfo(object):
def __init__(self):
self.hashtree_range = None
self.filesystem_range = None
self.hash_algorithm = None
self.salt = None
self.root_hash = None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
class HashtreeInfoGenerator(object):
def Generate(self, image):
raise NotImplementedError
def DecomposeSparseImage(self, image):
raise NotImplementedError
def ValidateHashtree(self):
raise NotImplementedError
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
def __init__(self, partition_size, block_size, fec_supported):
"""Initialize VerityTreeInfo with the sparse image and input property.
Arguments:
partition_size: The whole size in bytes of a partition, including the
filesystem size, padding size, and verity size.
block_size: Expected size in bytes of each block for the sparse image.
fec_supported: True if the verity section contains fec data.
"""
self.block_size = block_size
self.partition_size = partition_size
self.fec_supported = fec_supported
self.image = None
self.filesystem_size = None
self.hashtree_size = None
self.metadata_size = None
prop_dict = {
'partition_size': str(partition_size),
'verity': 'true',
'verity_fec': 'true' if fec_supported else None,
# 'verity_block_device' needs to be present to indicate a verity-enabled
# partition.
'verity_block_device': '',
# We don't need the following properties that are needed for signing the
# verity metadata.
'verity_key': '',
'verity_signer_cmd': None,
}
self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
self.hashtree_info = HashtreeInfo()
def DecomposeSparseImage(self, image):
"""Calculate the verity size based on the size of the input image.
Since we already know the structure of a verity enabled image to be:
[filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
calculate the size and offset of each section.
"""
self.image = image
assert self.block_size == image.blocksize
assert self.partition_size == image.total_blocks * self.block_size, \
"partition size {} doesn't match with the calculated image size." \
" total_blocks: {}".format(self.partition_size, image.total_blocks)
adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
assert adjusted_size % self.block_size == 0
verity_tree_size = GetVerityTreeSize(adjusted_size)
assert verity_tree_size % self.block_size == 0
metadata_size = GetVerityMetadataSize(adjusted_size)
assert metadata_size % self.block_size == 0
self.filesystem_size = adjusted_size
self.hashtree_size = verity_tree_size
self.metadata_size = metadata_size
self.hashtree_info.filesystem_range = RangeSet(
data=[0, adjusted_size // self.block_size])
self.hashtree_info.hashtree_range = RangeSet(
data=[adjusted_size // self.block_size,
(adjusted_size + verity_tree_size) // self.block_size])
def _ParseHashtreeMetadata(self):
"""Parses the hash_algorithm, root_hash, salt from the metadata block."""
metadata_start = self.filesystem_size + self.hashtree_size
metadata_range = RangeSet(
data=[metadata_start // self.block_size,
(metadata_start + self.metadata_size) // self.block_size])
meta_data = b''.join(self.image.ReadRangeSet(metadata_range))
# More info about the metadata structure available in:
# system/extras/verity/build_verity_metadata.py
META_HEADER_SIZE = 268
header_bin = meta_data[0:META_HEADER_SIZE]
header = struct.unpack("II256sI", header_bin)
# header: magic_number, version, signature, table_len
assert header[0] == 0xb001b001, header[0]
table_len = header[3]
verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
table_entries = verity_table.rstrip().split()
# Expected verity table format: "1 block_device block_device block_size
# block_size data_blocks data_blocks hash_algorithm root_hash salt"
assert len(table_entries) == 10, "Unexpected verity table size {}".format(
len(table_entries))
assert (int(table_entries[3]) == self.block_size and
int(table_entries[4]) == self.block_size)
assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
int(table_entries[6]) * self.block_size == self.filesystem_size)
self.hashtree_info.hash_algorithm = table_entries[7].decode()
self.hashtree_info.root_hash = table_entries[8].decode()
self.hashtree_info.salt = table_entries[9].decode()
def ValidateHashtree(self):
"""Checks that we can reconstruct the verity hash tree."""
# Writes the filesystem section to a temp file; and calls the executable
# build_verity_tree to construct the hash tree.
adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
with open(adjusted_partition, "wb") as fd:
self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
generated_verity_tree = common.MakeTempFile(prefix="verity")
root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
# The salt should be always identical, as we use fixed value.
assert salt == self.hashtree_info.salt, \
"Calculated salt {} doesn't match the one in metadata {}".format(
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
logger.warning(
"Calculated root hash %s doesn't match the one in metadata %s",
root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes
# as the one in the sparse image.
with open(generated_verity_tree, 'rb') as fd:
return fd.read() == b''.join(self.image.ReadRangeSet(
self.hashtree_info.hashtree_range))
def Generate(self, image):
"""Parses and validates the hashtree info in a sparse image.
Returns:
hashtree_info: The information needed to reconstruct the hashtree.
Raises:
HashtreeInfoGenerationError: If we fail to generate the exact bytes of
the hashtree.
"""
self.DecomposeSparseImage(image)
self._ParseHashtreeMetadata()
if not self.ValidateHashtree():
raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
return self.hashtree_info
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
if info_dict.get("avb_enable") == "true":
builder = VerifiedBootVersion2VerityImageBuilder(
partition_name,
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
info_dict.get("avb_avbtool"),
key_path,
algorithm,
# Salt is None because custom images have no fingerprint property to be
# used as the salt.
None,
signing_args)
return builder
| 34.704324
| 80
| 0.710405
| 3,183
| 24,883
| 5.326107
| 0.138863
| 0.062113
| 0.01805
| 0.009556
| 0.356043
| 0.251106
| 0.190114
| 0.155843
| 0.133605
| 0.121041
| 0
| 0.006529
| 0.206004
| 24,883
| 716
| 81
| 34.752793
| 0.851546
| 0.257324
| 0
| 0.260766
| 0
| 0
| 0.092269
| 0.012794
| 0
| 0
| 0.000554
| 0
| 0.0311
| 1
| 0.095694
| false
| 0.002392
| 0.019139
| 0
| 0.186603
| 0.002392
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc840d0dac6246e96ea6db4d9f0daf705ae65cf7
| 4,091
|
py
|
Python
|
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | 8
|
2015-02-04T23:03:36.000Z
|
2021-05-02T10:56:24.000Z
|
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | null | null | null |
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | 3
|
2018-04-23T08:18:13.000Z
|
2019-10-12T17:32:54.000Z
|
#!/usr/bin/env python
"""org archive to html table converter"""
import os
import datetime
import itertools
from .utils.date import minutestr, total_minutes
def rootname_from_archive_olpath(node):
"""
Find rootname from ARCHIVE_OLPATH property.
Return None if not found.
"""
olpath = node.get_property('ARCHIVE_OLPATH')
if olpath:
olpathlist = olpath.split('/', 1)
if len(olpathlist) > 1:
(rootname, dummy) = olpathlist
else:
rootname = olpath
return rootname
return None
def find_rootname(node):
"""
Find rootname given node
"""
rootname = rootname_from_archive_olpath(node)
if not rootname:
n = node
p = node.parent
while not p.is_root():
n = p
p = p.parent
# n is root node
rootname = rootname_from_archive_olpath(n) or n.heading
return rootname
def key_row_from_node(node):
"""
Return three tuple (key, row) whose elemens are
key object for sorting table and dictionary which has following
keywords: heading, closed, scheduled, effort, clocksum, rootname.
"""
heading = node.heading
# find rootname
rootname = find_rootname(node)
if heading == rootname:
rootname = ""
# calc clocksum if CLOCK exists
clocksum = ''
clocklist = node.clock
if clocklist:
clocksum = sum([total_minutes(c.duration) for c in clocklist])
closed = node.closed
scheduled = node.scheduled
effort = node.get_property('Effort')
row = dict(
heading=heading,
closed=closed and closed.start.strftime('%a %d %b %H:%M'),
scheduled=scheduled and scheduled.start.strftime('%a %d %b %H:%M'),
effort=effort and minutestr(effort),
clocksum=clocksum and minutestr(clocksum),
rootname=rootname,
)
return (closed.start if closed else None, row)
def unique_name_from_paths(pathlist):
namelist = []
for path in pathlist:
name = os.path.basename(path)
if name in namelist:
name_orig = name
i = 1
while name not in namelist:
name = "%s <%d>" % (name_orig, i)
i += 1
namelist.append(name)
return namelist
def sameday(dt1, dt2):
return (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date) and
dt1.year == dt2.year and
dt1.month == dt2.month and
dt1.day == dt2.day)
def table_add_oddday(key_table):
"""
Add oddday key in each rows of key_table *IN PLACE*.
Note that key should be a ``datetime.date`` object.
"""
previous = None
odd = True
for (key, row) in key_table:
this = key
if not sameday(this, previous):
odd = not odd
row['oddday'] = odd
previous = this
def get_data(orgnodes_list, orgpath_list, done, num=100):
"""
Get data for rendering jinja2 template. Data is dictionary like this:
table: list of `row`
list of row generated by ``row_from_node``
orgpathname_list: list of `orgpathname`
orgpathname: dict
contains `orgpath` and `orgname`.
`orgname` is short and unique name for `orgpath`.
title: str
a title
"""
key_table = []
orgname_list = unique_name_from_paths(orgpath_list)
for (nodelist, orgname) in zip(orgnodes_list, orgname_list):
for node in nodelist:
if node.todo == done:
(key, row) = key_row_from_node(node)
if key:
row['orgname'] = orgname
key_table.append((key, row))
orgpathname_list = [
dict(orgpath=orgpath, orgname=orgname)
for (orgpath, orgname) in zip(orgpath_list, orgname_list)]
key_table.sort(reverse=True)
table_add_oddday(key_table)
table = list(itertools.islice((row for (key, row) in key_table), num))
return dict(table=table, orgpathname_list=orgpathname_list,
title='Recently archived tasks')
| 29.014184
| 75
| 0.60792
| 508
| 4,091
| 4.779528
| 0.281496
| 0.019769
| 0.031301
| 0.041186
| 0.107496
| 0.060956
| 0.014827
| 0
| 0
| 0
| 0
| 0.006259
| 0.296993
| 4,091
| 140
| 76
| 29.221429
| 0.837969
| 0.199707
| 0
| 0.022222
| 0
| 0
| 0.02929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077778
| false
| 0
| 0.044444
| 0.011111
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc884ea5bc9215fa52b9d78882591e5166747f7f
| 2,927
|
py
|
Python
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 4
|
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
@tvm.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = tvm.reduce_axis((0, IC), name='ic')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
return tvm.compute((N, OC, OH, OW), lambda i, oc, h, w: \
tvm.sum(data[i, ic, h+dh, w+dw] * weight[oc, ic, dh, dw],
axis=[ic, dh, dw]))
def test_with():
n = tvm.size_var('n')
m = tvm.size_var('m')
l = tvm.size_var('l')
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((m, l), name='B')
with tvm.tag_scope(tag="gemm"):
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda i, j: tvm.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello" : 1, "arr": [10, 12]})
assert C.op.tag == 'gemm'
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.load_json(tvm.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
C = compute_conv(A, B)
assert C.op.tag == 'conv'
assert len(C.op.attrs) == 0
def test_nested():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
try:
with tvm.tag_scope(tag='conv'):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| 31.138298
| 80
| 0.60164
| 487
| 2,927
| 3.531828
| 0.287474
| 0.061047
| 0.087209
| 0.032558
| 0.261628
| 0.169767
| 0.146512
| 0.146512
| 0.146512
| 0.146512
| 0
| 0.010743
| 0.236761
| 2,927
| 93
| 81
| 31.473118
| 0.759176
| 0.271268
| 0
| 0.301587
| 0
| 0
| 0.043026
| 0
| 0
| 0
| 0
| 0
| 0.15873
| 1
| 0.063492
| false
| 0.015873
| 0.031746
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc88c81b50b3710bc62276602ff44a775a8cb6eb
| 11,840
|
py
|
Python
|
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | 1
|
2020-06-12T07:21:56.000Z
|
2020-06-12T07:21:56.000Z
|
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | null | null | null |
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
The pozyx ranging demo (c) Pozyx Labs
please check out https://www.pozyx.io/Documentation/Tutorials/getting_started/Python
This demo requires one (or two) pozyx shields. It demonstrates the 3D orientation and the functionality
to remotely read register data from a pozyx device. Connect one of the Pozyx devices with USB and run this script.
This demo reads the following sensor data:
- pressure
- acceleration
- magnetic field strength
- angular velocity
- the heading, roll and pitch
- the quaternion rotation describing the 3D orientation of the device. This can be used to transform from the body coordinate system to the world coordinate system.
- the linear acceleration (the acceleration excluding gravity)
- the gravitational vector
The data can be viewed in the Processing sketch orientation_3D.pde
"""
from time import time
from time import sleep
from pypozyx import *
from pypozyx.definitions.bitmasks import POZYX_INT_MASK_IMU
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.udp_client import SimpleUDPClient
from modules.user_input_config_functions import UserInputConfigFunctions as UserInput
from modules.file_writing import SensorAndPositionFileWriting as FileWriting
from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging
import time as t
class Orientation3D(object):
"""Reads out all sensor data from either a local or remote Pozyx"""
def __init__(self, pozyx, osc_udp_client, anchors, algorithm=POZYX_POS_ALG_UWB_ONLY,
dimension=POZYX_3D, height=1000, remote_id=None):
self.pozyx = pozyx
self.osc_udp_client = osc_udp_client
self.anchors = anchors
self.algorithm = algorithm
self.dimension = dimension
self.height = height
self.remote_id = remote_id
def setup(self):
"""There is no specific setup functionality"""
self.current_time = time()
"""Sets up the Pozyx for positioning by calibrating its anchor list."""
print("------------POZYX POSITIONING V1.0 -------------")
print("NOTES: ")
print("- No parameters required.")
print()
print("- System will auto start configuration")
print()
print("- System will auto start positioning")
print("------------POZYX POSITIONING V1.0 --------------")
print()
print("START Ranging: ")
self.pozyx.clearDevices(self.remote_id)
self.setAnchorsManual()
self.printPublishConfigurationResult()
def loop(self):
"""Gets new IMU sensor data"""
# check sensor data status
sensor_data = SensorData()
calibration_status = SingleRegister()
if self.remote_id is not None or self.pozyx.checkForFlag(POZYX_INT_MASK_IMU, 0.01) == POZYX_SUCCESS:
status = self.pozyx.getAllSensorData(sensor_data, self.remote_id)
status &= self.pozyx.getCalibrationStatus(calibration_status, self.remote_id)
if status == POZYX_SUCCESS:
# check position status
position = Coordinates()
status = self.pozyx.doPositioning(
position, self.dimension, self.height, self.algorithm, remote_id=self.remote_id)
if status == POZYX_SUCCESS:
# self.print_publish_position(position)
self.publishSensorData(sensor_data, calibration_status)
return sensor_data, position
else:
pass
# self.print_publish_error_code("positioning")
return "Error, no data to print for this line"
def publishSensorData(self, sensor_data, calibration_status):
"""Makes the OSC sensor data package and publishes it"""
self.msg_builder = OscMessageBuilder("/sensordata")
self.msg_builder.add_arg(int(1000 * (time() - self.current_time)))
current_time = time()
self.addSensorData(sensor_data)
self.addCalibrationStatus(calibration_status)
self.osc_udp_client.send(self.msg_builder.build())
def addSensorData(self, sensor_data):
"""Adds the sensor data to the OSC message"""
self.msg_builder.add_arg(sensor_data.pressure)
self.addComponentsOSC(sensor_data.acceleration)
self.addComponentsOSC(sensor_data.magnetic)
self.addComponentsOSC(sensor_data.angular_vel)
self.addComponentsOSC(sensor_data.euler_angles)
self.addComponentsOSC(sensor_data.quaternion)
self.addComponentsOSC(sensor_data.linear_acceleration)
self.addComponentsOSC(sensor_data.gravity_vector)
def addComponentsOSC(self, component):
"""Adds a sensor data component to the OSC message"""
for data in component.data:
self.msg_builder.add_arg(float(data))
def addCalibrationStatus(self, calibration_status):
"""Adds the calibration status data to the OSC message"""
self.msg_builder.add_arg(calibration_status[0] & 0x03)
self.msg_builder.add_arg((calibration_status[0] & 0x0C) >> 2)
self.msg_builder.add_arg((calibration_status[0] & 0x30) >> 4)
self.msg_builder.add_arg((calibration_status[0] & 0xC0) >> 6)
def setAnchorsManual(self):
"""Adds the manually measured anchors to the Pozyx's device list one for one."""
status = self.pozyx.clearDevices(self.remote_id)
for anchor in self.anchors:
status &= self.pozyx.addDevice(anchor, self.remote_id)
if len(anchors) > 4:
status &= self.pozyx.setSelectionOfAnchors(POZYX_ANCHOR_SEL_AUTO, len(anchors))
return status
def printPublishConfigurationResult(self):
"""Prints and potentially publishes the anchor configuration result in a human-readable way."""
list_size = SingleRegister()
status = self.pozyx.getDeviceListSize(list_size, self.remote_id)
print("List size: {0}".format(list_size[0]))
if list_size[0] != len(self.anchors):
self.printPublishErrorCode("configuration")
return
device_list = DeviceList(list_size=list_size[0])
status = self.pozyx.getDeviceIds(device_list, self.remote_id)
print("Calibration result:")
print("Anchors found: {0}".format(list_size[0]))
print("Anchor IDs: ", device_list)
for i in range(list_size[0]):
anchor_coordinates = Coordinates()
status = self.pozyx.getDeviceCoordinates(
device_list[i], anchor_coordinates, self.remote_id)
print("ANCHOR,0x%0.4x, %s" % (device_list[i], str(anchor_coordinates)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/anchor", [device_list[i], int(anchor_coordinates.x), int(anchor_coordinates.y), int(anchor_coordinates.z)])
sleep(0.025)
def printPublishErrorCode(self, operation):
"""Prints the Pozyx's error and possibly sends it as a OSC packet"""
error_code = SingleRegister()
network_id = self.remote_id
if network_id is None:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, local error code %s" % (operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, error_code[0]])
return
status = self.pozyx.getErrorCode(error_code, self.remote_id)
if status == POZYX_SUCCESS:
print("ERROR %s on ID %s, error code %s" %
(operation, "0x%0.4x" % network_id, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/error", [operation, network_id, error_code[0]])
else:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, couldn't retrieve remote error code, local error code %s" %
(operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, -1])
# should only happen when not being able to communicate with a remote Pozyx.
if __name__ == '__main__':
# shortcut to not have to find out the port yourself
serial_port = get_serial_ports()[0].device
remote_id = 0x6110 # remote device network ID
remote = True # whether to use a remote device
# if not remote:
# remote_id = None
index = 0
previous_cycle_time = 0
current_cycle_time = 0
attributes_to_log = ["acceleration"]
to_use_file = False
filename = None
"""User input configuration section, comment out to use above settings"""
remote = UserInput.use_remote()
remote_id = UserInput.get_remote_id(remote)
to_use_file = UserInput.use_file()
filename = UserInput.get_filename(to_use_file)
attributes_to_log = UserInput.get_multiple_attributes_to_log()
use_processing = True
ip = "127.0.0.1"
network_port = 8888
anchors = [DeviceCoordinates(0x6863, 1, Coordinates(0, 0, 2000)),
DeviceCoordinates(0x615a, 1, Coordinates(0, 18288, 1000)),
DeviceCoordinates(0x607c, 1, Coordinates(18288, 0, 1000)),
DeviceCoordinates(0x6134, 1, Coordinates(18288, 18288, 2000))]
# algorithm = POZYX_POS_ALG_UWB_ONLY # positioning algorithm to use
algorithm = POZYX_POS_ALG_TRACKING # tracking positioning algorithm
dimension = POZYX_3D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
pozyx = PozyxSerial(serial_port)
osc_udp_client = SimpleUDPClient(ip, network_port)
o = Orientation3D(pozyx, osc_udp_client, anchors, algorithm, dimension, height, remote_id)
o.setup()
logfile = None
if to_use_file:
logfile = open(filename, 'a')
FileWriting.write_sensor_and_position_header_to_file(logfile)
start = ConsoleLogging.get_time()
try:
while True:
# updates elapsed time and time difference
elapsed = ConsoleLogging.get_elapsed_time(ConsoleLogging, start)
previous_cycle_time = current_cycle_time
current_cycle_time = elapsed
time_difference = current_cycle_time - previous_cycle_time
# store iterate_file returns as a tuple or an error message
loop_results = o.loop()
if type(loop_results) == tuple:
one_cycle_sensor_data, one_cycle_position = loop_results
formatted_data_dictionary = ConsoleLogging.format_sensor_data(
one_cycle_sensor_data, attributes_to_log)
if type(formatted_data_dictionary) == dict:
formatted_data_dictionary["Position"] = [
"x:", one_cycle_position.x, "y:", one_cycle_position.y, "z:", one_cycle_position.z]
ConsoleLogging.log_sensor_data_to_console(index, elapsed, formatted_data_dictionary)
if to_use_file:
FileWriting.write_sensor_and_position_data_to_file(
index, elapsed, time_difference,
logfile, one_cycle_sensor_data, one_cycle_position)
# if the iterate_file didn't return a tuple, it returned an error string
else:
error_string = loop_results
ConsoleLogging.print_data_error_message(index, elapsed, error_string)
index += 1 # increment data index
# this allows Windows users to exit the while iterate_file by pressing ctrl+c
except KeyboardInterrupt:
pass
if to_use_file:
logfile.close()
| 44.179104
| 164
| 0.661064
| 1,420
| 11,840
| 5.302817
| 0.233803
| 0.035857
| 0.022311
| 0.021248
| 0.193891
| 0.145153
| 0.106242
| 0.084462
| 0.056043
| 0.056043
| 0
| 0.016968
| 0.253378
| 11,840
| 267
| 165
| 44.344569
| 0.834842
| 0.182348
| 0
| 0.129032
| 0
| 0
| 0.05952
| 0
| 0
| 0
| 0.004863
| 0
| 0
| 1
| 0.053763
| false
| 0.010753
| 0.053763
| 0
| 0.139785
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|