hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
c767a30a144f00ed374a25303b7bc5a67257ead0
205
py
Python
DeepBrainSeg/tumor/models/__init__.py
JasperHG90/DeepBrainSeg
92cf5f758f115e7ac51202966a1287fb58c09d78
[ "MIT" ]
130
2019-04-09T02:35:44.000Z
2022-02-26T15:53:19.000Z
DeepBrainSeg/tumor/models/__init__.py
koriavinash1/DeepMedX
02fcee6d7b21b16e7f1e28089f24be56ef6b9383
[ "MIT" ]
11
2019-09-18T03:55:29.000Z
2021-01-03T13:11:20.000Z
DeepBrainSeg/tumor/models/__init__.py
koriavinash1/DeepMedX
02fcee6d7b21b16e7f1e28089f24be56ef6b9383
[ "MIT" ]
38
2018-11-28T01:34:41.000Z
2022-01-17T03:53:47.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function __license__ = 'MIT' __maintainer__ = ['Avinash Kori'] __email__ = ['koriavinash1@gmail.com']
20.5
38
0.804878
23
205
6.043478
0.695652
0.215827
0.345324
0
0
0
0
0
0
0
0
0.005556
0.121951
205
9
39
22.777778
0.766667
0
0
0
0
0
0.180488
0.107317
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.166667
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
c786fa48ae248c13f410e430635f37e4a4f24376
262
py
Python
src/panel_components/html/widgets/html_widget.py
MarcSkovMadsen/panel-components
ca176f605006165889cbbd3c775d24cdcc4f964d
[ "MIT" ]
5
2020-09-30T03:10:33.000Z
2022-02-04T19:54:58.000Z
src/panel_components/html/widgets/html_widget.py
MarcSkovMadsen/panel-components
ca176f605006165889cbbd3c775d24cdcc4f964d
[ "MIT" ]
null
null
null
src/panel_components/html/widgets/html_widget.py
MarcSkovMadsen/panel-components
ca176f605006165889cbbd3c775d24cdcc4f964d
[ "MIT" ]
1
2021-11-08T19:00:45.000Z
2021-11-08T19:00:45.000Z
"""# HTML Widget Functionality Provides the HTMLWidget and HTMLWidgetGenerator """ from ..html_component import HTMLComponent class HTMLWidget(HTMLComponent): # pylint: disable=too-few-public-methods """Your HTML Widgets should inherits this"""
26.2
75
0.748092
28
262
6.964286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.160305
262
9
76
29.111111
0.886364
0.591603
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c7909d919579d52dae1f4fe4462677d9cae0c51a
116
py
Python
chapter2/2.6/bit_operator_test.py
yifengyou/crazy-python
28099bd5011de6981a7c5412783952cc7601ae0c
[ "Unlicense" ]
null
null
null
chapter2/2.6/bit_operator_test.py
yifengyou/crazy-python
28099bd5011de6981a7c5412783952cc7601ae0c
[ "Unlicense" ]
null
null
null
chapter2/2.6/bit_operator_test.py
yifengyou/crazy-python
28099bd5011de6981a7c5412783952cc7601ae0c
[ "Unlicense" ]
null
null
null
# coding:utf-8 # File Name: bit_operator_test # Author : yifengyou # Date : 2021/07/18 print(5|9)
19.333333
34
0.594828
17
116
3.941176
1
0
0
0
0
0
0
0
0
0
0
0.13253
0.284483
116
6
35
19.333333
0.674699
0.827586
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
c7b54019a134bb6e8bc02f338ba886bd180dd55e
11,249
py
Python
src/python/magnum/test/test_primitives.py
hi-ogawa/magnum-bindings
5f324bdcde828d9ffc3bcd8e562480875586c54b
[ "MIT" ]
null
null
null
src/python/magnum/test/test_primitives.py
hi-ogawa/magnum-bindings
5f324bdcde828d9ffc3bcd8e562480875586c54b
[ "MIT" ]
null
null
null
src/python/magnum/test/test_primitives.py
hi-ogawa/magnum-bindings
5f324bdcde828d9ffc3bcd8e562480875586c54b
[ "MIT" ]
null
null
null
# # This file is part of Magnum. # # Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 # Vladimír Vondruš <mosra@centrum.cz> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import unittest from magnum import * from magnum import primitives class Axis(unittest.TestCase): def test_2d(self): a = primitives.axis2d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_colors()) def test_3d(self): a = primitives.axis3d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_colors()) class Capsule(unittest.TestCase): def test_2d_wireframe(self): a = primitives.capsule2d_wireframe(3, 3, 2.0) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) def test_3d_solid(self): a = primitives.capsule3d_solid(3, 3, 10, 2.0, primitives.CapsuleTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.capsule3d_solid(3, 3, 10, 2.0) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_3d_wireframe(self): a = primitives.capsule3d_wireframe(5, 3, 12, 0.3) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) class Circle(unittest.TestCase): def test_2d_solid(self): a = primitives.circle2d_solid(5, primitives.CircleTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_FAN) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.circle2d_solid(5) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLE_FAN) self.assertFalse(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_2d_wireframe(self): a = primitives.circle2d_wireframe(5) self.assertEqual(a.primitive, MeshPrimitive.LINE_LOOP) self.assertFalse(a.is_indexed()) def test_3d_solid(self): a = primitives.circle3d_solid(5, primitives.CircleTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_FAN) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.circle3d_solid(5) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLE_FAN) self.assertFalse(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_3d_wireframe(self): a = primitives.circle3d_wireframe(5) self.assertEqual(a.primitive, MeshPrimitive.LINE_LOOP) self.assertFalse(a.is_indexed()) class Cone(unittest.TestCase): def test_solid(self): a = primitives.cone_solid(5, 7, 7.1, primitives.ConeFlags.GENERATE_TEXTURE_COORDS|primitives.ConeFlags.CAP_END) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.cone_solid(5, 7, 7.1) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_wireframe(self): a = primitives.cone_wireframe(16, 7.1) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) class Crosshair(unittest.TestCase): def test_2d(self): a = primitives.crosshair2d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) def test_3d(self): a = primitives.crosshair3d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) class Cube(unittest.TestCase): def test_solid(self): a = primitives.cube_solid() self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) def test_solid_strip(self): a = primitives.cube_solid_strip() self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) def test_wireframe(self): a = primitives.cube_wireframe() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) class Cylinder(unittest.TestCase): def test_solid(self): a = primitives.cylinder_solid(7, 12, 0.2, primitives.CylinderFlags.GENERATE_TEXTURE_COORDS|primitives.CylinderFlags.CAP_ENDS) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.cylinder_solid(7, 12, 0.2) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_wireframe(self): a = primitives.cylinder_wireframe(8, 16, 1.1) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) class Gradient(unittest.TestCase): def test_gradient2d(self): a = primitives.gradient2d((3.1, 2.0), Color3(), (0.2, 1.1), Color4()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) def test_gradient2d_horizontal(self): a = primitives.gradient2d_horizontal(Color4(), Color3()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) def test_gradient2d_vertical(self): a = primitives.gradient2d_vertical(Color4(), Color3()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) def test_gradient3d(self): a = primitives.gradient3d((3.1, 2.0, 0.1), Color3(), (0.2, 1.1, 1.2), Color4()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) def test_gradient3d_horizontal(self): a = primitives.gradient3d_horizontal(Color4(), Color3()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) def test_gradient3d_vertical(self): a = primitives.gradient3d_vertical(Color4(), Color3()) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_colors()) class Grid(unittest.TestCase): def test_solid(self): a = primitives.grid3d_solid((4, 5)) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) def test_wireframe(self): a = primitives.grid3d_wireframe((2, 7)) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed()) class Icosphere(unittest.TestCase): def test(self): a = primitives.icosphere_solid(2) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) class Line(unittest.TestCase): def test_2d(self): a = primitives.line2d((1.0, 2.0), (7.0, 3.2)) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) def test_2d_identity(self): a = primitives.line2d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) def test_3d(self): a = primitives.line3d((1.0, 2.0, 1.1), (7.0, 3.2, 1.1)) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) def test_3d_identity(self): a = primitives.line3d() self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertFalse(a.is_indexed()) class Plane(unittest.TestCase): def test_solid(self): a = primitives.plane_solid(primitives.PlaneTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.plane_solid() self.assertEqual(b.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_wireframe(self): a = primitives.plane_wireframe() self.assertEqual(a.primitive, MeshPrimitive.LINE_LOOP) self.assertFalse(a.is_indexed()) class Square(unittest.TestCase): def test_solid(self): a = primitives.square_solid(primitives.SquareTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.square_solid() self.assertEqual(b.primitive, MeshPrimitive.TRIANGLE_STRIP) self.assertFalse(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_wireframe(self): a = primitives.square_wireframe() self.assertEqual(a.primitive, MeshPrimitive.LINE_LOOP) self.assertFalse(a.is_indexed()) class UVSphere(unittest.TestCase): def test_solid(self): a = primitives.uv_sphere_solid(3, 7, primitives.UVSphereTextureCoords.GENERATE) self.assertEqual(a.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(a.is_indexed()) self.assertTrue(a.has_texture_coords2d()) b = primitives.uv_sphere_solid(3, 7) self.assertEqual(b.primitive, MeshPrimitive.TRIANGLES) self.assertTrue(b.is_indexed()) self.assertFalse(b.has_texture_coords2d()) def test_wireframe(self): a = primitives.uv_sphere_wireframe(6, 8) self.assertEqual(a.primitive, MeshPrimitive.LINES) self.assertTrue(a.is_indexed())
39.470175
133
0.694195
1,383
11,249
5.503254
0.142444
0.088687
0.072921
0.121535
0.738668
0.725266
0.716857
0.700959
0.640389
0.631454
0
0.024377
0.194062
11,249
284
134
39.609155
0.815023
0.106676
0
0.657277
0
0
0
0
0
0
0
0
0.535211
1
0.173709
false
0
0.014085
0
0.253521
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
c7e66c47301e10837b405b0fcb38d1cf7669716a
44
py
Python
static/downloads/WunderCan Installer.app/Contents/Resources/WunderCan.app/Contents/Resources/canvasToWunderDaemon/venv27/lib/python2.7/site-packages/mechanize/_version.py
colflah/wunderCan
af4e3211d8d17c2dd786836ac6b634037b86a8fd
[ "CC-BY-3.0" ]
null
null
null
static/downloads/WunderCan Installer.app/Contents/Resources/WunderCan.app/Contents/Resources/canvasToWunderDaemon/venv27/lib/python2.7/site-packages/mechanize/_version.py
colflah/wunderCan
af4e3211d8d17c2dd786836ac6b634037b86a8fd
[ "CC-BY-3.0" ]
null
null
null
static/downloads/WunderCan Installer.app/Contents/Resources/WunderCan.app/Contents/Resources/canvasToWunderDaemon/venv27/lib/python2.7/site-packages/mechanize/_version.py
colflah/wunderCan
af4e3211d8d17c2dd786836ac6b634037b86a8fd
[ "CC-BY-3.0" ]
null
null
null
"0.3.3" __version__ = (0, 3, 3, None, None)
14.666667
35
0.568182
9
44
2.333333
0.444444
0.190476
0.285714
0
0
0
0
0
0
0
0
0.166667
0.181818
44
2
36
22
0.416667
0.113636
0
0
0
0
0.113636
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c7ec84f893de35cc98066a893bba638791662093
342
py
Python
laa_court_data_api_app/models/ping.py
ministryofjustice/laa-court-data-api
2e79faac7469f0b31ecca0539906d281db08f86c
[ "MIT" ]
1
2022-01-27T14:28:40.000Z
2022-01-27T14:28:40.000Z
laa_court_data_api_app/models/ping.py
ministryofjustice/laa-court-data-api
2e79faac7469f0b31ecca0539906d281db08f86c
[ "MIT" ]
16
2022-01-28T11:01:27.000Z
2022-03-30T14:01:11.000Z
laa_court_data_api_app/models/ping.py
ministryofjustice/laa-court-data-api
2e79faac7469f0b31ecca0539906d281db08f86c
[ "MIT" ]
null
null
null
from typing import Optional from pydantic import BaseModel, Field class Ping(BaseModel): app_branch: Optional[str] = Field(None, example='test_branch') build_date: Optional[str] = Field(None, example='02022022') build_tag: Optional[str] = Field(None, example='test') commit_id: Optional[str] = Field(None, example='123456')
34.2
66
0.72807
45
342
5.422222
0.488889
0.180328
0.262295
0.327869
0.47541
0.254098
0
0
0
0
0
0.047782
0.143275
342
9
67
38
0.784983
0
0
0
0
0
0.084795
0
0
0
0
0
0
1
0
true
0
0.285714
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
5
4027cc09474dbbcb9b5be9471e8f876319d47b20
149
py
Python
code/Dist.py
asdqwe12011/Medical-Datasets-Classification
1f50326e0cc0c08e7e94a0dede771ebb58f6998f
[ "MIT" ]
null
null
null
code/Dist.py
asdqwe12011/Medical-Datasets-Classification
1f50326e0cc0c08e7e94a0dede771ebb58f6998f
[ "MIT" ]
null
null
null
code/Dist.py
asdqwe12011/Medical-Datasets-Classification
1f50326e0cc0c08e7e94a0dede771ebb58f6998f
[ "MIT" ]
null
null
null
import numpy as np euclidean = lambda x1, x2: np.sqrt(np.sum((x1 - x2) ** 2, axis=-1)) manhattan = lambda x1, x2: np.sum(np.abs(x1 - x2), axis=-1)
24.833333
67
0.624161
29
149
3.206897
0.517241
0.172043
0.215054
0.258065
0
0
0
0
0
0
0
0.089431
0.174497
149
5
68
29.8
0.666667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
402fa81dd2cde4b44c27243ea592f6ebf9b8ead5
5,876
py
Python
python/test.py
dailymotion/chash
6083ba13b98f2e35cf26ac2ca229cb679b55abde
[ "MIT" ]
9
2015-02-19T16:26:48.000Z
2018-12-27T07:54:46.000Z
python/test.py
dailymotion/chash
6083ba13b98f2e35cf26ac2ca229cb679b55abde
[ "MIT" ]
3
2016-07-11T21:21:49.000Z
2017-03-04T12:44:57.000Z
python/test.py
dailymotion/chash
6083ba13b98f2e35cf26ac2ca229cb679b55abde
[ "MIT" ]
4
2015-06-10T09:25:43.000Z
2019-08-22T11:05:03.000Z
#!/usr/bin/python import unittest import chash import os from hashlib import md5 class TestCHash(unittest.TestCase): def test_add_target(self): c = chash.CHash() self.failUnlessEqual(c.add_target("192.168.0.1"), None) self.failUnlessEqual(c.count_targets(), 1) def test_set_targets(self): c = chash.CHash() self.failUnlessEqual(c.set_targets({"192.168.0.1" : 2, "192.168.0.2" : 2, "192.168.0.3" : 2,} ), 3) self.failUnlessEqual(c.count_targets(), 3) self.failUnlessRaises(TypeError, c.set_targets, "9") self.failUnlessRaises(TypeError, c.set_targets, {3 : 2, "192.168.0.2" : 2, "192.168.0.3" : 2,}) def test_clear_targets(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") self.failUnlessEqual(c.count_targets(), 3) self.failUnlessEqual(c.clear_targets(), None) self.failUnlessEqual(c.count_targets(), 0) def test_remove_target(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") self.failUnlessEqual(c.count_targets(), 3) self.failUnlessEqual(c.remove_target("192.168.0.1"), None) self.failUnlessEqual(c.count_targets(), 2) self.failUnlessRaises(chash.CHashError, c.remove_target, "192.168.0.1") self.failUnlessEqual(c.count_targets(), 2) self.failUnlessEqual(c.remove_target("192.168.0.2"), None) self.failUnlessEqual(c.count_targets(), 1) self.failUnlessRaises(chash.CHashError, c.remove_target, "192.168.0.2") self.failUnlessEqual(c.count_targets(), 1) self.failUnlessEqual(c.remove_target("192.168.0.3"), None) self.failUnlessEqual(c.count_targets(), 0) def test_count_targets(self): c = chash.CHash() self.failUnlessEqual(c.count_targets(), 0) self.failUnlessEqual(c.add_target("192.168.0.1"), None) self.failUnlessEqual(c.count_targets(), 1) self.failUnlessEqual(c.add_target("192.168.0.2"), None) self.failUnlessEqual(c.count_targets(), 2) def test_lookup_list(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") c.add_target("192.168.0.4") self.failUnlessEqual(c.lookup_list("1"), ["192.168.0.1"]) self.failUnlessEqual(c.lookup_list("1", 1), ["192.168.0.1"]) self.failUnlessEqual(c.lookup_list("1", 2), ["192.168.0.1", "192.168.0.3"]) self.failUnlessEqual(c.lookup_list("1", 3), ["192.168.0.1", "192.168.0.3", "192.168.0.2"]) self.failUnlessEqual(c.lookup_list("2"), ["192.168.0.1"]) self.failUnlessEqual(c.lookup_list("3"), ["192.168.0.4"]) self.failUnlessEqual(c.lookup_list("4"), ["192.168.0.4"]) def test_lookup_balance(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") c.add_target("192.168.0.4") self.failUnlessEqual(c.lookup_balance("1"), "192.168.0.1") # FIXME # self.failUnlessEqual(c.lookup_balance("1", 1), "192.168.0.1") # self.failUnlessEqual(c.lookup_balance("1", 2), "192.168.0.3") # self.failUnlessEqual(c.lookup_balance("1", 3), "192.168.0.2") self.failUnlessEqual(c.lookup_balance("2"), "192.168.0.1") self.failUnlessEqual(c.lookup_balance("3"), "192.168.0.4") self.failUnlessEqual(c.lookup_balance("4"), "192.168.0.4") def test_serialize(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") c.add_target("192.168.0.4") cs = c.serialize() self.assertEqual(len(cs), 3138) self.assertEqual(md5(cs).hexdigest(), '975639a999ade73bd4fc64f3486ea093') c2 = chash.CHash() c2.unserialize(cs) self.failUnlessEqual(c2.count_targets(), 4) self.failUnlessEqual(c2.lookup_balance("1"), "192.168.0.1") self.failUnlessEqual(c2.lookup_balance("2"), "192.168.0.1") self.failUnlessEqual(c2.lookup_balance("3"), "192.168.0.4") self.failUnlessEqual(c2.lookup_balance("4"), "192.168.0.4") def test_serialize_file(self): csf = "test.cs" c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") c.add_target("192.168.0.3") c.add_target("192.168.0.4") cs = c.serialize_to_file(csf) self.failUnlessEqual(os.path.exists(csf), True) c2 = chash.CHash() c2.unserialize_from_file(csf) os.remove(csf) self.failUnlessEqual(c2.count_targets(), 4) self.failUnlessEqual(c2.lookup_balance("1"), "192.168.0.1") self.failUnlessEqual(c2.lookup_balance("2"), "192.168.0.1") self.failUnlessEqual(c2.lookup_balance("3"), "192.168.0.4") self.failUnlessEqual(c2.lookup_balance("4"), "192.168.0.4") def test_usage(self): c = chash.CHash() c.add_target("192.168.0.1") c.add_target("192.168.0.2") self.failUnlessEqual(c.lookup_balance("1"), "192.168.0.1") c.add_target("192.168.0.3") self.failUnlessEqual(c.lookup_balance("9"), "192.168.0.3") c.remove_target("192.168.0.3") self.failUnlessEqual(c.lookup_balance("9"), "192.168.0.1") c.remove_target("192.168.0.1") self.failUnlessEqual(c.lookup_balance("9"), "192.168.0.2") c.remove_target("192.168.0.2") self.failUnlessRaises(chash.CHashError, c.lookup_balance, "9") c.add_target("192.168.0.2") c.add_target("192.168.0.1") self.failUnlessEqual(c.lookup_balance("9"), "192.168.0.1") if __name__ == '__main__': unittest.main()
37.909677
107
0.614364
878
5,876
3.980638
0.068337
0.125322
0.146209
0.141345
0.880401
0.857797
0.811731
0.79628
0.721888
0.59628
0
0.145629
0.201838
5,876
154
108
38.155844
0.599574
0.038972
0
0.566667
0
0
0.149415
0.005672
0
0
0
0.006494
0.016667
1
0.083333
false
0
0.033333
0
0.125
0
0
0
0
null
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
407165da7ea10b881b66e58e7ae0ee18da673707
130
py
Python
training/admin.py
Atwinenickson/lendsuphumanresourcemanagement
b46df164d59a4e94300376d679e07bd9a60d6343
[ "MIT", "Unlicense" ]
36
2019-11-26T11:46:32.000Z
2022-02-17T13:18:18.000Z
training/admin.py
Atwinenickson/lendsuphumanresourcemanagement
b46df164d59a4e94300376d679e07bd9a60d6343
[ "MIT", "Unlicense" ]
13
2020-02-14T09:30:16.000Z
2022-03-12T00:58:09.000Z
training/admin.py
Atwinenickson/lendsuphumanresourcemanagement
b46df164d59a4e94300376d679e07bd9a60d6343
[ "MIT", "Unlicense" ]
16
2019-06-14T12:11:29.000Z
2022-02-14T15:16:07.000Z
from django.contrib import admin # Register your models here. from training.models import Training admin.site.register(Training)
21.666667
36
0.823077
18
130
5.944444
0.611111
0
0
0
0
0
0
0
0
0
0
0
0.115385
130
6
37
21.666667
0.930435
0.2
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
40724eacd6e5a4fbe49ee8d5d63a0943967aea58
508
py
Python
spec/construct/test_repeat_n_struct.py
DarkShadow44/kaitai_struct_tests
4bb13cef82965cca66dda2eb2b77cd64e9f70a12
[ "MIT" ]
11
2018-04-01T03:58:15.000Z
2021-08-14T09:04:55.000Z
spec/construct/test_repeat_n_struct.py
DarkShadow44/kaitai_struct_tests
4bb13cef82965cca66dda2eb2b77cd64e9f70a12
[ "MIT" ]
73
2016-07-20T10:27:15.000Z
2020-12-17T18:56:46.000Z
spec/construct/test_repeat_n_struct.py
DarkShadow44/kaitai_struct_tests
4bb13cef82965cca66dda2eb2b77cd64e9f70a12
[ "MIT" ]
37
2016-08-15T08:25:56.000Z
2021-08-28T14:48:46.000Z
# Autogenerated from KST: please remove this line if doing any edits by hand! import unittest from repeat_n_struct import _schema class TestRepeatNStruct(unittest.TestCase): def test_repeat_n_struct(self): r = _schema.parse_file('src/repeat_n_struct.bin') self.assertEqual(len(r.chunks), 2) self.assertEqual(r.chunks[0].offset, 16) self.assertEqual(r.chunks[0].len, 8312) self.assertEqual(r.chunks[1].offset, 8328) self.assertEqual(r.chunks[1].len, 15)
33.866667
77
0.706693
74
508
4.716216
0.554054
0.2149
0.183381
0.252149
0.26361
0
0
0
0
0
0
0.040865
0.181102
508
14
78
36.285714
0.798077
0.147638
0
0
1
0
0.053364
0.053364
0
0
0
0
0.5
1
0.1
false
0
0.2
0
0.4
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
40941aa3a60935471182d7834353ccf52abbd7b9
31
py
Python
tests/net.py
math2001/nine43
7749dc63b9717a6ee4ddc1723d6c59e16046fc01
[ "MIT" ]
null
null
null
tests/net.py
math2001/nine43
7749dc63b9717a6ee4ddc1723d6c59e16046fc01
[ "MIT" ]
3
2019-04-27T06:34:34.000Z
2019-04-27T21:29:31.000Z
tests/net.py
math2001/nine43
7749dc63b9717a6ee4ddc1723d6c59e16046fc01
[ "MIT" ]
null
null
null
import trio.testing import net
10.333333
19
0.83871
5
31
5.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
2
20
15.5
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
40ec28f38697d41a33df2dbaaaba85d981d3d1dc
47
py
Python
lambda filter.py
Srinivassan-Ramamurthy/python_programs
53b390669c7e88532c67d80b758a9199d6fde8cf
[ "bzip2-1.0.6" ]
null
null
null
lambda filter.py
Srinivassan-Ramamurthy/python_programs
53b390669c7e88532c67d80b758a9199d6fde8cf
[ "bzip2-1.0.6" ]
null
null
null
lambda filter.py
Srinivassan-Ramamurthy/python_programs
53b390669c7e88532c67d80b758a9199d6fde8cf
[ "bzip2-1.0.6" ]
null
null
null
l=[1,2,3,4,5] l=(map(lambda x:x*2,l)) print(l)
11.75
23
0.553191
15
47
1.733333
0.666667
0
0
0
0
0
0
0
0
0
0
0.139535
0.085106
47
3
24
15.666667
0.465116
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dc18c8c34b192a88d9b5f04f56217c7362d5fd46
219
py
Python
server/src/voodoo/gen/caller_checker.py
zstars/weblabdeusto
09bd9d93d483671bca67ee5c70a9c412eb5d352f
[ "BSD-2-Clause" ]
null
null
null
server/src/voodoo/gen/caller_checker.py
zstars/weblabdeusto
09bd9d93d483671bca67ee5c70a9c412eb5d352f
[ "BSD-2-Clause" ]
null
null
null
server/src/voodoo/gen/caller_checker.py
zstars/weblabdeusto
09bd9d93d483671bca67ee5c70a9c412eb5d352f
[ "BSD-2-Clause" ]
null
null
null
ALL = 'All servers' def caller_check(servers = ALL): def func_wrapper(func): # TODO: To be implemented. Could get current_app and check it. Useful for anything? return func return func_wrapper
24.333333
91
0.684932
31
219
4.709677
0.677419
0.150685
0
0
0
0
0
0
0
0
0
0
0.246575
219
8
92
27.375
0.884848
0.369863
0
0
0
0
0.081481
0
0
0
0
0.125
0
1
0.4
false
0
0
0.2
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
1
0
0
5
90f34dfe2d1a8f7109a4920e53a4f67ebb179193
752
py
Python
test/test_level.py
Rui-Tang/fluid-client-python
cfe6e981ad5092c3ba9ba15760858572e914f6d6
[ "Apache-2.0" ]
1
2021-11-18T11:10:48.000Z
2021-11-18T11:10:48.000Z
test/test_level.py
Rui-Tang/fluid-client-python
cfe6e981ad5092c3ba9ba15760858572e914f6d6
[ "Apache-2.0" ]
null
null
null
test/test_level.py
Rui-Tang/fluid-client-python
cfe6e981ad5092c3ba9ba15760858572e914f6d6
[ "Apache-2.0" ]
2
2021-05-19T06:35:55.000Z
2022-03-16T09:48:59.000Z
# coding: utf-8 """ fluid client for fluid # noqa: E501 OpenAPI spec version: v0.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import Fluid from Fluid.io.fluid-cloudnative.module.level import Level # noqa: E501 from Fluid.rest import ApiException class TestLevel(unittest.TestCase): """Level unit test stubs""" def setUp(self): pass def tearDown(self): pass def testLevel(self): """Test Level""" # FIXME: construct object with mandatory attributes with example values # model = Fluid.models.level.Level() # noqa: E501 pass if __name__ == '__main__': unittest.main()
18.341463
79
0.656915
92
752
5.228261
0.619565
0.049896
0.054054
0
0
0
0
0
0
0
0
0.02109
0.243351
752
40
80
18.8
0.824253
0.18883
0
0.214286
1
0
0.019901
0
0
0
0
0.025
0
0
null
null
0.214286
0.357143
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
1
0
0
1
1
0
0
0
0
5
29115f5ac31eda7fea8e9f0149c9565ef45b41b9
22
py
Python
onemoretest.py
jrgreenberg/jrgreenberg_PyNet
7b72a5bff7bbdd3418b0f2a37498c9da55b2e5a1
[ "MIT" ]
null
null
null
onemoretest.py
jrgreenberg/jrgreenberg_PyNet
7b72a5bff7bbdd3418b0f2a37498c9da55b2e5a1
[ "MIT" ]
null
null
null
onemoretest.py
jrgreenberg/jrgreenberg_PyNet
7b72a5bff7bbdd3418b0f2a37498c9da55b2e5a1
[ "MIT" ]
null
null
null
print "one more test"
11
21
0.727273
4
22
4
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
1
22
22
0.888889
0
0
0
0
0
0.590909
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
292352880ef602961c6a75b43ef8a77e661325c5
38
py
Python
nnio/zoo/__init__.py
hocop/nnio
1354396f4043f1e67b642d8d670f7ad8e642f39e
[ "MIT" ]
6
2021-12-14T13:48:46.000Z
2021-12-17T15:49:22.000Z
nnio/zoo/__init__.py
hocop/nnio
1354396f4043f1e67b642d8d670f7ad8e642f39e
[ "MIT" ]
null
null
null
nnio/zoo/__init__.py
hocop/nnio
1354396f4043f1e67b642d8d670f7ad8e642f39e
[ "MIT" ]
2
2021-02-25T20:55:13.000Z
2021-04-27T06:31:45.000Z
from . import edgetpu, openvino, onnx
19
37
0.763158
5
38
5.8
1
0
0
0
0
0
0
0
0
0
0
0
0.157895
38
1
38
38
0.90625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
29349811084bf745ea4606cf2f3b45f66b9c7afa
134
py
Python
parser/parser.py
polytechnique-ease/msr4ml
bf4122b8c7cfc4aa7e8e1f24d11dd30b0def2954
[ "MIT" ]
null
null
null
parser/parser.py
polytechnique-ease/msr4ml
bf4122b8c7cfc4aa7e8e1f24d11dd30b0def2954
[ "MIT" ]
null
null
null
parser/parser.py
polytechnique-ease/msr4ml
bf4122b8c7cfc4aa7e8e1f24d11dd30b0def2954
[ "MIT" ]
null
null
null
from astroid import parse def to_ast(py_file): return parse(py_file) def main(): pass if __name__ == "__main__": main()
13.4
26
0.664179
20
134
3.9
0.7
0.153846
0
0
0
0
0
0
0
0
0
0
0.223881
134
10
27
13.4
0.75
0
0
0
0
0
0.059259
0
0
0
0
0
0
1
0.285714
false
0.142857
0.142857
0.142857
0.571429
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
2945f0f41ee09684721770d6457eec4d7c7e6e92
100
py
Python
enthought/block_canvas/canvas/block_canvas.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/block_canvas/canvas/block_canvas.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/block_canvas/canvas/block_canvas.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from blockcanvas.canvas.block_canvas import *
25
45
0.85
13
100
6.076923
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.11
100
3
46
33.333333
0.88764
0.12
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2951e52cf3bb6c5f058077451ab86e379a6692d8
2,217
py
Python
tests/test_nn/test_activation.py
shizuku/cranet
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
[ "MIT" ]
4
2021-10-31T13:31:13.000Z
2021-12-11T08:45:36.000Z
tests/test_nn/test_activation.py
Azathoth1729/cranet
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
[ "MIT" ]
null
null
null
tests/test_nn/test_activation.py
Azathoth1729/cranet
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
[ "MIT" ]
2
2021-10-31T13:34:28.000Z
2021-11-21T09:11:46.000Z
import os import sys import unittest import numpy as np import torch from torch.nn import functional as torch_F from src.cranet.nn import functional as cranet_F from src import cranet from ..utils import teq class TestRelu(unittest.TestCase): def test_relu_0(self): for _ in range(100): shape = [4] a = np.random.uniform(-1, 1, shape) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) c0 = cranet_F.relu(a0) c1 = torch_F.relu(a1) delta = np.random.uniform(-1, 1, shape) delta0 = cranet.Tensor(delta) delta1 = torch.tensor(delta) c0.zero_grad() c0.backward(delta0) c1.backward(delta1) self.assertTrue(teq(c0, c1)) self.assertTrue(teq(a0.grad, a1.grad)) def test_relu_1(self): for _ in range(100): shape = (2, 2) a = np.random.uniform(-1, 1, shape) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) c0 = cranet_F.relu(a0) c1 = torch_F.relu(a1) delta = np.random.uniform(-1, 1, shape) delta0 = cranet.Tensor(delta) delta1 = torch.tensor(delta) c0.zero_grad() c0.backward(delta0) c1.backward(delta1) self.assertTrue(teq(c0, c1)) self.assertTrue(teq(a0.grad, a1.grad)) def test_relu_2(self): for _ in range(100): shape = (2, 3, 4, 5, 7, 9) a = np.random.uniform(-1, 1, shape) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) c0 = cranet_F.relu(a0) c1 = torch_F.relu(a1) delta = np.random.uniform(-1, 1, shape) delta0 = cranet.Tensor(delta) delta1 = torch.tensor(delta) c0.zero_grad() c0.backward(delta0) c1.backward(delta1) self.assertTrue(teq(c0, c1)) self.assertTrue(teq(a0.grad, a1.grad)) if __name__ == '__main__': sys.path.append(os.getcwd()) unittest.main()
30.791667
53
0.54894
291
2,217
4.065292
0.202749
0.040575
0.076078
0.08115
0.778529
0.778529
0.759932
0.721048
0.721048
0.721048
0
0.056949
0.334687
2,217
71
54
31.225352
0.745085
0
0
0.688525
0
0
0.003608
0
0
0
0
0
0.098361
1
0.04918
false
0
0.147541
0
0.213115
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4625dd79054dcfcc1b53cc939abf68d1931583e6
912
py
Python
src/bindings/python/src/openvino/preprocess/__init__.py
pfinashx/openvino
1d417e888b508415510fb0a92e4a9264cf8bdef7
[ "Apache-2.0" ]
1
2022-02-26T17:33:44.000Z
2022-02-26T17:33:44.000Z
src/bindings/python/src/openvino/preprocess/__init__.py
pfinashx/openvino
1d417e888b508415510fb0a92e4a9264cf8bdef7
[ "Apache-2.0" ]
17
2021-11-25T10:22:17.000Z
2022-03-28T13:19:31.000Z
src/bindings/python/src/openvino/preprocess/__init__.py
AlexRogalskiy/openvino
ac2e639ff8f9a607c3c682a4c4e165c238eb817f
[ "Apache-2.0" ]
1
2020-12-13T22:16:54.000Z
2020-12-13T22:16:54.000Z
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ Package: ngraph Low level wrappers for the PrePostProcessing c++ api. """ # flake8: noqa from openvino.utils import add_openvino_libs_to_path add_openvino_libs_to_path() # main classes from openvino.pyopenvino.preprocess import InputInfo from openvino.pyopenvino.preprocess import OutputInfo from openvino.pyopenvino.preprocess import InputTensorInfo from openvino.pyopenvino.preprocess import OutputTensorInfo from openvino.pyopenvino.preprocess import InputModelInfo from openvino.pyopenvino.preprocess import OutputModelInfo from openvino.pyopenvino.preprocess import PrePostProcessor from openvino.pyopenvino.preprocess import PreProcessSteps from openvino.pyopenvino.preprocess import PostProcessSteps from openvino.pyopenvino.preprocess import ColorFormat from openvino.pyopenvino.preprocess import ResizeAlgorithm
33.777778
59
0.858553
106
912
7.311321
0.433962
0.185806
0.312258
0.454194
0.593548
0
0
0
0
0
0
0.013237
0.088816
912
26
60
35.076923
0.919374
0.190789
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.923077
0
0.923077
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
465d8a3695d2f9c6321c7f2d70b8bb84b9adee5e
461
py
Python
aliyun_exporter/test_utils.py
BoringCat/aliyun-exporter
3c9af5e6ee42c9b3876ce312a3c39db046032740
[ "Apache-2.0" ]
1
2021-12-30T11:42:45.000Z
2021-12-30T11:42:45.000Z
aliyun_exporter/test_utils.py
BoringCat/aliyun-exporter
3c9af5e6ee42c9b3876ce312a3c39db046032740
[ "Apache-2.0" ]
null
null
null
aliyun_exporter/test_utils.py
BoringCat/aliyun-exporter
3c9af5e6ee42c9b3876ce312a3c39db046032740
[ "Apache-2.0" ]
1
2021-12-30T11:42:48.000Z
2021-12-30T11:42:48.000Z
from .utils import format_metric, format_period def test_format_metric(): assert format_metric("") == "" assert format_metric("a.b.c") == "a_b_c" assert format_metric("aBcD") == "aBcD" assert format_metric(".a.b.c.") == "_a_b_c_" def test_format_period(): assert format_period("") == "" assert format_period("3000") == "3000" assert format_period("5,10,25,50,100,300") == "5" assert format_period("300_00,500_00") == "300_00"
30.733333
53
0.652928
68
461
4.088235
0.323529
0.345324
0.258993
0.172662
0.453237
0.172662
0.172662
0.172662
0.172662
0
0
0.096104
0.164859
461
14
54
32.928571
0.625974
0
0
0
0
0
0.169197
0
0
0
0
0
0.727273
1
0.181818
true
0
0.090909
0
0.272727
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
5
468d68c559422ff65b4d8e458dcf57470bc871f0
74
py
Python
lib/__init__.py
FDA-ARGOS2/data.argosdb
5f65a2ea3a92c2936f19278a59a9ee0435738620
[ "MIT" ]
null
null
null
lib/__init__.py
FDA-ARGOS2/data.argosdb
5f65a2ea3a92c2936f19278a59a9ee0435738620
[ "MIT" ]
null
null
null
lib/__init__.py
FDA-ARGOS2/data.argosdb
5f65a2ea3a92c2936f19278a59a9ee0435738620
[ "MIT" ]
null
null
null
""" bco app and stuff """ from .bcoutils import * from .validate import *
12.333333
23
0.675676
10
74
5
0.8
0
0
0
0
0
0
0
0
0
0
0
0.189189
74
6
24
12.333333
0.833333
0.22973
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
46994ff8b717ab9cf6b46929624d6725410330e7
10,111
py
Python
client/client.py
pekacheew/ACG-CA2
bfba8e5091a152eed88b850811a843e435753c70
[ "MIT" ]
null
null
null
client/client.py
pekacheew/ACG-CA2
bfba8e5091a152eed88b850811a843e435753c70
[ "MIT" ]
null
null
null
client/client.py
pekacheew/ACG-CA2
bfba8e5091a152eed88b850811a843e435753c70
[ "MIT" ]
null
null
null
import pickle, base64, time, datetime, ftplib, io, random, hashlib, sys, traceback from Cryptodome.Random import get_random_bytes from Cryptodome.Cipher import PKCS1_OAEP, PKCS1_v1_5, AES from Cryptodome.Util.Padding import pad, unpad from Cryptodome.PublicKey import RSA from Cryptodome.Signature import pkcs1_15 from Cryptodome.Hash import SHA256 from pathlib import Path BASE_DIR = Path(__file__).parent.parent PRIVATE_KEY_LOC = BASE_DIR / 'client' / 'private.pem' PUBLIC_KEY_LOC = BASE_DIR / 'client' / 'public.pem' SERVER_PUBLIC_KEY_LOC = BASE_DIR / 'client' / 'serverPub.pem' # These variables are to support the mock camera my_pict = "iVBORw0KGgoAAAANSUhEUgAAAFAAAABQCAMAAAC5zwKfAAADAFBMVEWOjo6JiYmxsbGFhYWfn5+oqKiXl5eRkZGBgYGMjIx9fX0JCQl4eHgWFha6urpwcHBkZGQiIiLBwcFSUlJBQUEwMDDGxsbMzMzU1NTk5OQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAazXNTAAAACXBIWXMAAAsTAAALEwEAmpwYAAALaElEQVRYhW2Y65LcOI6FP1wkSmXZ7d3pef/n28t0uNtZJVEkgf0hZbp6YhWRlRVS8giXgwOQsh6wHGsFpkRDibrGVOSRpEhxtHuHDg6dkvypX8eHncSyvxEazSvrXmpROBwWAEIBjvlgJjCoZnPRHhzzAeDgHd/R7wcIz4UwJQAF2FcHjgVQJqhy/wbp8zodJ07XHqAO/VR3731Hx0g+X+vrP+VCgwm9brBSSfvWPgA6oKj38wyiH0dH+ZAvjOm4V76uTzd0oilABQ7mIceH3s8iIHowv80zEUTA2X/bkoN4AuyFykLZ9RmL6WV51ZRlV43AvROqAQEdx2fl7W1W1Y9cplyJX0YW2CnOE226bWZlrDtXHvplf+gF2Z9BAI3+FmEaxNMQVqj6hKERAMHga0MjmOlBKO6z4zNHv9zFPSKU9tu1BPZfcVQ4nlY3gCXPtwMCZgdldnfH/cTdXYl4Lpw5NgJo3B7t1NvlVxAX9vJ1f73JPRNCLBwnpaPB7DBfroe//yIMUKjOi6AKyu7rE6/j3pJhkgwnBdy5vnsngHPd6/QLTQP8iQVQ5Zj1yW33GGdjpiYaTUmuRyZyQKAB+3/8q67xjOFOuQCPL3dgmM/fP+73ZTtHYnV3dOSwnjCdJr1MXqSfBMzs//nzxZsKYBPdu5Ogfen6tYKmLtZr242IVJBcz8zMTAvIfM8hpaMy65DcRcdV6qN8TgrALFODoPS+xzwTCJLSebDBgxBJIuW0GOseejjeNF4G1lf0LgrufPlA0bmf+zwnITLGiAEXHpByRbLysRJxxuHzwkq51KbcgqAXCUtXFMV6xA4hjA02tu27w7ZtjBgBkN453gBCctS9XGkuzyxfNFyidCDm3tsSGcKADTbHuwN9gwfDhNCzRNNQwmHda3nivOSnwZHTqQROi0jIwQbujuP3xQYDhLnRCyzzsTwL8Am4PCtxwSHQzFiQkLjR3EWESxWeiJEzvb/RHW9PyrwsnOKqPJPrZWOJTIFt6/hnHviNCBLRNNPP7uO3LPx/LkecgoJjgQTwuAVMIKG741f5DSAjRvpbP0EoUGv9GyCQ9FuCFWBsAPROZgL0ox/03rftCiNG4vR+95fyBCy3crGel+zLQQIPHv2+rgh6p998HLBcls/hAVDKy+X67E6OQ1xBk3G99nGbf2PCY3v6AyLiri9tuWGeSQ4MHO10BeyC23Dc6c9+8ACuvOxXFHrYjVcK4Pt6N9WrJPsV7iTFBmw4cLEacPr2srm0BccPRH6FEL3QLpUcdBSH+SmKjwupf/pcl0EtV8rm9vS4Xi6XQ6Mx0W7bO/mMERsXua8vd3fYNoxrGJEjcB55efxKCjDRflGoR1dRecYKmD7V3lXOCWVPAtwL9fK41EscFGgQJgBh+6nH0uRShct5mUASSBwwRCoyUqOb6BW+Sing7GWiTRMs9wCUFAhtkx6SEWwrcvWm7D8m6cNDRYYFncDJpzaU+lm+bo3V0IFSWderFhgfj20Fkv8x+eaQI9vke1v3+gWQyDvLd0/pbiI0k6B0RZyAty+AmIqIBMfHG3B0m2chUZ1S5zmtr5Hp/OX1ThvgOxBtgoCqofhJfDntp4UMEzvXaUCf8nhondoYTcDn7s6uw7uD5EtrQNdfUmZ3AKpL+UN7pIsc83lgdNShnPvZRDTrz/09bQo6ML5dQawVqP6aPluUfQGHyY7dMr67dH+v6zuLPBa6iQ6T2YXsHyH7xMJyeNf9k2QVm6Z9YlhYpImGqJ5xJCzfjjrKR/3nu5o1k76L9pTvR5v6w7/0Eb2fi6F6jLsPMrwOhVWhodMc0Rbv0AZMnh997fMcSTLJ48FoMcH5huVf/YtlzJJJj2NcQSvrleV90tSYJEkky+nnmFF5DNfzrNGzizQVTbCII99b73PXAQz3ZFSbEyqjl0pROAggGpDvQB0js09Ea9ubtK7eiAFz0SE5vn+fNPO9B0mA/XUJ2Z3o6pQTrRenbRgvZQiMn2PjIxuowL7YGPADywwZlqKaZIqscfO6VPRAiQlYyPOeT5UxkIDBY1e/h9a1VgPMkIxhA5lQ+2l2VWwF6oou9VIbOJiFP+Zx8WhgpJoSmjOAHOqnmkEmZpjglsfin7ZA6/6UrAZ70Vz/Kc5EADZAhMCGn1NAMuVkOcYQGZCZGUJfy8Xpyrqygx63Wjea8RC7ZMIMYAw0W9cvjXWVCaWJmY2VceuJlLZq2z8N2ia4BkYkYfKQRcahSEpmYuPL6ulCTlTakuMfUpMTVFQQ1/bBKdkLgz4d69T9NdmwnF2cZBKNlMEGLNJ+fqukPID5/ctx+O+98wDimsbzMZ3XnL9S2NFnzwMIs2vbekXWl5X9ry+H7DG275vk9LCPfrD49op8iAn7Wl+J1uc2CoilHmW8drju0v53p4+Rsr2VBWFy9ccPYBsgTJbHiKbsT8laX40pgFnEPEVOuRpU33/qQGcxJ4O0phIdO+52Xx3YvDzrZIfPLQCgnD++kdNRAPpDdVgOE+bjgXnVqevO2Mf2DNLQx9XaSuXaoL2kLKBW1Mjb8MdDtZOWQ0Zdcpqql56SRvoDAy2dLpptusWVlU99GWBGRyKclTRUE1O1E8uj9DpJbT1VHXEDTm30VXTlUwf4+xYfVQbZ5lkYpiGuAhxDc4hJDBhnN/KiPaCPVLtnwStozr78AozihRFBCpmC9HmXLIwQAp1FD5lDZfewgGmsMzlg3Z+NpPpy6RXqcKzDwKiOgGTqHDMx7jFfyGE6IF22XefhptLvjv4sv5uHQL32uQNKQfkeiRmJmKK2fd8sU8Y86zzgqxM0yP7Smv3vtFE41qvrCWDdt50hS0o2DeXdewnHJzwlxY9EdJLMWyc+J6XdrLnIlGl6gh6+ImF8vLu2Yzb+0Wb92H423t9Y5YGOWRiMX9ueFXac41ei6xo6HE2G7F8hx77ybZY3pKT8Dm//vcx/ysNoJqlXsoPleZSxsr569AvVhviimfIDsLN/Pf/1fj3J+K8/vs4/3Gz4jwzENX+p/2u9IJYgI8V7TzNRyUg5vSoip/g8/vhYhfyjlcJfjgwOAz2/SoccQp86MPU+XeM40G6yB8NTPDLmUEiRs2u18WcgHkeppjkMkJRN8hpCgXVfeZXepNdub2FlCXKIT0KopSoZ0UWngOwj0wwsyUw9iyT577WGc52iKeuxwG4OITJlCIokRgR8h/4wzymSoWmJfHspn9zKtX7m4d164wxsuNNjqAU6UlLDfg7MUu0UwbKPKdLvEnuyZv8k/deNBXaWgOy4d5U8OTWHmqurt6YMkUx0TcZX4qXtK+uzUFYTZA7QkcOZjp6GSqjlOeFdNEkBRNwSBFTtnCnzUEbCuM6HOjBxOP7JaPZ1jRMgu39hLxMRGhJC6LjPlQiT8mFvJH87NV352wnn8+blM3J0XRY6omiqpPQISEgFDdmscxl4I7xaver14XmmtrcYTvvZvPTmFRE7kcG2kUiqaLdWfLQnwucTDI7DNGfNRMYC0N1Dig3rx6ouoil11kTzPFNpkyAlZdEa12FI5vOMjaMDi35Gh5XqOtpfNi07rqbUNZ7lkOoJ3Vis1l9b9Ofy6++/8ZzKeIufE/BRKWNq8xhDr/MHAVI33OqYyr+ytGubfnD8SshnwAX24vOBJW2f3yvONwklsQcwUgMTYd9pMv2Iyi2vCy/h11dcD2CfB29ofB0Upr0isvQgU8xUjbDZoaeNILNv1+Z+OY7jhfXJwiXarNv3Hde5FQjvRyK/uUFkQsqYLPPcB2jy3XWte1wn4ctyo/kLch66eW1NHaQZMMyO8xtuNZXI03w2aGmhErxNY2frf+ZyLMcCEBpgimmAmH/J0XPORc9Y3moimYp2pCGoqk0a5McgSLXU+fTR47fpyL4AHRL0/wCh2bfAENQtdQAAAABJRU5ErkJggg==" # System variable of main program camera_id = 102 # This ID is unique for each camera installed server_name = "localhost" # server name or IP address RSA_OVERHEAD = 66 # assume there is a overhead of 66 bytes per RSA encrypted block. when using OAEP with sha256 class ENC_payload: # A data class to store a encrypted file content. # The file content has been encrypted using an AES key. # The AES key is encrypted by a public key and stored in the enc_session_key instance attribute. def __init__(self): self.enc_session_key="" self.aes_iv = "" self.encrypted_content="" self.signature = "" self.pub_key = "" def connect_server_send( file_name: str , file_data: bytes ) -> bool: global my_pict """This function send file_data using FTP and save it as file_name in the remote server. It will simulate intermittent transfer. Args: file_name (str): file_name of file save in server as a String file_data (bytes): content of file as byte array Returns: bool: True if send, False otherwise """ try: if random.randrange(1,10) > 8: raise Exception("Generated Random Network Error") # create random failed transfer ftp = ftplib.FTP() # use init will use port 21 , hence use connect() ftp.connect( server_name , 2121) # use high port 2121 instead of 21 #connect to server through ftp ftp.login() # ftp.login(user="anonymous", passwd = 'anonymous@') #login to ftp server fn= file_data # default text file name if len(sys.argv) == 2: fn=sys.argv[1] try: pub_key_content=open(SERVER_PUBLIC_KEY_LOC,"r").read() # PUBLIC RSA KEY pub_key=RSA.import_key(pub_key_content) priv_key = RSA.import_key(open(PRIVATE_KEY_LOC,'r').read()) rsa_cipher = PKCS1_OAEP.new(pub_key) aes_key = get_random_bytes(AES.block_size) aes_cipher = AES.new(aes_key,AES.MODE_CBC) # ciphertext = aes_cipher.encrypt(pad(fn,AES.block_size)) #hash = hashlib.new('SHA256') #hash.update(fn) hash = SHA256.new(fn) signer = pkcs1_15.new(priv_key) signature = signer.sign(hash) #print(signature) enc_payload = ENC_payload() #enc_payload.file_hash = hash.hexdigest() enc_payload.pub_key = pub_key_content enc_payload.signature = signature enc_payload.enc_session_key = rsa_cipher.encrypt(aes_key) enc_payload.aes_iv = aes_cipher.iv # retrieve the randomly generated iv value enc_payload.encrypted_content=ciphertext # actually encrypt cipher text with AES encrypted=pickle.dumps(enc_payload) # serialize the enc_payload object into a byte stream. except: print("Opps") traceback.print_exc(file=sys.stdout) ftp.storbinary('STOR ' + file_name, io.BytesIO( encrypted ) ) #send file data and file name to ftp server ftp.quit() #after finish, quit current session return True except Exception as e: print(e, "while sending", file_name ) return False def get_picture() -> bytes: # as good as "don't touch" """This function simulate a motion activated camera unit. It will return 0 byte if no motion is detected. Returns: bytes: a byte array of a photo or 0 byte no motion detected """ time.sleep(1) # simulate slow processor if random.randrange(1,10) > 8: # simulate no motion detected return b'' else: return base64.b64decode(my_pict) while True: # Main function try: my_image = get_picture() # get picture if len(my_image) == 0: time.sleep(10) # sleep for 10 sec if there is no image print( "Random no motion detected") else: f_name = 'encrypted.dat' f_name = str(camera_id) + "_" + str(f_name) + "_" + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f.jpg" ) # FILE NAME if connect_server_send( f_name , my_image): print(f_name , " sent" ) except KeyboardInterrupt: exit() # gracefully exit if control-C detected
86.418803
5,052
0.806646
798
10,111
10.067669
0.428571
0.013692
0.003734
0.004854
0.013816
0.011451
0
0
0
0
0
0.078835
0.140639
10,111
117
5,053
86.418803
0.845782
0.136287
0
0.064935
0
0.012987
0.625554
0.606394
0
1
0
0
0
1
0.038961
false
0
0.12987
0
0.233766
0.064935
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
5
469e43ba13afa394185bbcba2977196ffc0e9bfa
98
py
Python
wsgi.py
rwolande/wapi
5e2406f6a5c02840610ac2ef78bdfccc969dc9c6
[ "MIT" ]
null
null
null
wsgi.py
rwolande/wapi
5e2406f6a5c02840610ac2ef78bdfccc969dc9c6
[ "MIT" ]
null
null
null
wsgi.py
rwolande/wapi
5e2406f6a5c02840610ac2ef78bdfccc969dc9c6
[ "MIT" ]
null
null
null
import sys sys.path.insert(0, '/var/www/html/flask_app/wapi') from app import app as application
19.6
50
0.765306
18
98
4.111111
0.777778
0
0
0
0
0
0
0
0
0
0
0.011494
0.112245
98
4
51
24.5
0.83908
0
0
0
0
0
0.285714
0.285714
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d3bc082e94785f3efa0a174d3ab9083fb05277a4
474
py
Python
IMDSS/login/models.py
CCU-KLDP/IMDSS
b9a3d1adc78822dcf7ea1b59c607461dddf3e8cb
[ "MIT" ]
null
null
null
IMDSS/login/models.py
CCU-KLDP/IMDSS
b9a3d1adc78822dcf7ea1b59c607461dddf3e8cb
[ "MIT" ]
null
null
null
IMDSS/login/models.py
CCU-KLDP/IMDSS
b9a3d1adc78822dcf7ea1b59c607461dddf3e8cb
[ "MIT" ]
1
2021-11-05T13:28:53.000Z
2021-11-05T13:28:53.000Z
from django.db import models # Create your models here. class User_data(models.Model): """ @pony 使用者(醫生)資料 """ name = models.CharField(max_length=20) account = models.CharField(max_length=50) password = models.CharField(max_length=50) email = models.EmailField() department = models.CharField(max_length=50) def __str__(self): return self.account class Meta(): ordering = ["-department"]
21.545455
49
0.624473
55
474
5.218182
0.6
0.209059
0.250871
0.334495
0.271777
0
0
0
0
0
0
0.022923
0.263713
474
21
50
22.571429
0.799427
0.086498
0
0
0
0
0.028061
0
0
0
0
0
0
1
0.090909
false
0.090909
0.090909
0.090909
0.909091
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
3103fc750f5ec912ee1ab9fff8bb1c82416827c9
228
py
Python
app/load_config.py
bklim5/gcp-cloud-function-bitbucket-pipelines-serverless
29af1f94edd0b7299406c6b81c317a9264a5483f
[ "MIT" ]
null
null
null
app/load_config.py
bklim5/gcp-cloud-function-bitbucket-pipelines-serverless
29af1f94edd0b7299406c6b81c317a9264a5483f
[ "MIT" ]
null
null
null
app/load_config.py
bklim5/gcp-cloud-function-bitbucket-pipelines-serverless
29af1f94edd0b7299406c6b81c317a9264a5483f
[ "MIT" ]
null
null
null
from importlib import import_module from os import environ environment_name = environ.get('ENVIRONMENT_NAME', 'dev') config = import_module('app.config.{}'.format(environment_name)).CONFIG def get_config(): return config
22.8
71
0.776316
30
228
5.7
0.5
0.263158
0
0
0
0
0
0
0
0
0
0
0.114035
228
9
72
25.333333
0.846535
0
0
0
0
0
0.140351
0
0
0
0
0
0
1
0.166667
false
0
0.5
0.166667
0.833333
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
31129a46c5ea302227711bb5eafe0bc301fb2283
124
py
Python
pyparsebluray/__init__.py
Ichunjo/pyparsebluray
be77e84a6a782a34e52b6e8d38399f156bf1eec8
[ "MIT" ]
2
2021-07-17T18:01:49.000Z
2022-01-20T18:30:53.000Z
pyparsebluray/__init__.py
Ichunjo/pyparsebluray
be77e84a6a782a34e52b6e8d38399f156bf1eec8
[ "MIT" ]
null
null
null
pyparsebluray/__init__.py
Ichunjo/pyparsebluray
be77e84a6a782a34e52b6e8d38399f156bf1eec8
[ "MIT" ]
null
null
null
"""MPLS or playlist specifying an order in which clips (or parts of them) get played.""" # flake8: noqa from .mpls import *
31
88
0.717742
20
124
4.45
0.9
0
0
0
0
0
0
0
0
0
0
0.009804
0.177419
124
3
89
41.333333
0.862745
0.774194
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3121dac316b9d3d01be94064d3612456becc2f1e
41
py
Python
tests/__init__.py
rmorison/slack_forms
4f8bfae889941643d43a6564a5d74e52018d7ee8
[ "ISC" ]
null
null
null
tests/__init__.py
rmorison/slack_forms
4f8bfae889941643d43a6564a5d74e52018d7ee8
[ "ISC" ]
null
null
null
tests/__init__.py
rmorison/slack_forms
4f8bfae889941643d43a6564a5d74e52018d7ee8
[ "ISC" ]
2
2021-09-21T15:20:52.000Z
2021-09-21T17:39:14.000Z
"""Unit test package for slack_forms."""
20.5
40
0.707317
6
41
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.121951
41
1
41
41
0.777778
0.829268
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
312543f2e3748dcb1431a692ba2a9043975f8914
604
py
Python
swapi_explorer/swapi_explorer/explorer/utils.py
damianfraszczak/swapi_explorer
cc2b48e2d147d616fb6a36a6f3f340a32cb6c27d
[ "MIT" ]
null
null
null
swapi_explorer/swapi_explorer/explorer/utils.py
damianfraszczak/swapi_explorer
cc2b48e2d147d616fb6a36a6f3f340a32cb6c27d
[ "MIT" ]
null
null
null
swapi_explorer/swapi_explorer/explorer/utils.py
damianfraszczak/swapi_explorer
cc2b48e2d147d616fb6a36a6f3f340a32cb6c27d
[ "MIT" ]
null
null
null
import os import uuid from typing import Optional from django.conf import settings def get_random_filename(extension: str = "csv") -> str: return f"{uuid.uuid4().hex}.{extension}" def collection_upload_to(instance, filename: Optional[str] = None) -> str: filename = filename or get_random_filename() return os.path.join("collections", instance.__class__.__name__.lower(), filename) def get_full_collection_upload_to(instance, filename: Optional[str] = None) -> str: return os.path.join( settings.MEDIA_ROOT, collection_upload_to(instance=instance, filename=filename) )
28.761905
87
0.743377
79
604
5.417722
0.443038
0.11215
0.126168
0.182243
0.242991
0.242991
0.242991
0.242991
0.242991
0
0
0.001934
0.14404
604
20
88
30.2
0.825919
0
0
0
0
0
0.072848
0.049669
0
0
0
0
0
1
0.230769
false
0
0.307692
0.153846
0.769231
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
5
3136781481eb6f94b965e222ca5751a29d04e9b3
43
py
Python
pylox/__main__.py
otherJL0/pylox
49788f8d3a5a1944f1828e18d05ed51129993a5e
[ "MIT" ]
null
null
null
pylox/__main__.py
otherJL0/pylox
49788f8d3a5a1944f1828e18d05ed51129993a5e
[ "MIT" ]
null
null
null
pylox/__main__.py
otherJL0/pylox
49788f8d3a5a1944f1828e18d05ed51129993a5e
[ "MIT" ]
null
null
null
from .main import entrypoint entrypoint()
10.75
28
0.790698
5
43
6.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.139535
43
3
29
14.333333
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
314a681778e97c2e052e0e7c025eb296d56d5a25
181
py
Python
jduargs/result.py
jeandemeusy/jdu_args
cbaf69d70c4cc25492989787ff97b4642b58078f
[ "MIT" ]
null
null
null
jduargs/result.py
jeandemeusy/jdu_args
cbaf69d70c4cc25492989787ff97b4642b58078f
[ "MIT" ]
null
null
null
jduargs/result.py
jeandemeusy/jdu_args
cbaf69d70c4cc25492989787ff97b4642b58078f
[ "MIT" ]
null
null
null
class Result: """Class description.""" def __init__(self): """ Initialization of the class. """ pass def __str__(self): return
15.083333
36
0.497238
16
181
5.125
0.75
0
0
0
0
0
0
0
0
0
0
0
0.38674
181
11
37
16.454545
0.738739
0.259669
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0.2
0
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
31a2fab03fdd752bdcf7509af86a44df6bbea55f
47
py
Python
interact_plataforma.py
ignacio1244/cartera_virtual
9d272dd9fcbedcd2a634ca88f36b0cb2661a42b7
[ "MIT" ]
2
2021-07-24T18:07:11.000Z
2021-08-09T22:23:42.000Z
interact_plataforma.py
ignacio1244/cartera_virtual
9d272dd9fcbedcd2a634ca88f36b0cb2661a42b7
[ "MIT" ]
null
null
null
interact_plataforma.py
ignacio1244/cartera_virtual
9d272dd9fcbedcd2a634ca88f36b0cb2661a42b7
[ "MIT" ]
1
2021-07-18T21:34:13.000Z
2021-07-18T21:34:13.000Z
#Interacting with the plataforma.tcargo website
47
47
0.87234
6
47
6.833333
1
0
0
0
0
0
0
0
0
0
0
0
0.085106
47
1
47
47
0.953488
0.978723
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
31ac18e0f20d5ed77eaf41b160497fd4836f1133
424
py
Python
src/gocept/httpserverlayer/plonetestingzope/tests/test_zope2.py
gocept/gocept.httpserverlayer
5186a43a36bdbb7c3a888df3b1e9f934ab41d131
[ "ZPL-2.1" ]
1
2019-08-29T05:25:44.000Z
2019-08-29T05:25:44.000Z
src/gocept/httpserverlayer/plonetestingzope/tests/test_zope2.py
gocept/gocept.httpserverlayer
5186a43a36bdbb7c3a888df3b1e9f934ab41d131
[ "ZPL-2.1" ]
1
2021-01-13T09:49:56.000Z
2021-01-13T10:32:13.000Z
src/gocept/httpserverlayer/plonetestingzope/tests/test_zope2.py
gocept/gocept.httpserverlayer
5186a43a36bdbb7c3a888df3b1e9f934ab41d131
[ "ZPL-2.1" ]
null
null
null
import gocept.httpserverlayer.plonetestingzope import gocept.httpserverlayer.plonetestingzope.testing import gocept.httpserverlayer.tests.isolation import unittest class Zope2Tests( gocept.httpserverlayer.tests.isolation.IsolationTests, gocept.httpserverlayer.plonetestingzope.testing.IsolationTestHelper, unittest.TestCase): layer = gocept.httpserverlayer.plonetestingzope.testing.HTTP_LAYER
32.615385
76
0.825472
37
424
9.432432
0.405405
0.361032
0.424069
0.378224
0
0
0
0
0
0
0
0.002653
0.110849
424
12
77
35.333333
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.444444
0
0.666667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
31ce7d0cd7c9fef03ef15629550a7c8123c73b56
456
py
Python
dnnlib/__init__.py
icon-lab/FedGIMP
c110c4c12d56bb8fa53546e22330797212e7b197
[ "MIT" ]
45
2020-11-06T02:04:43.000Z
2022-01-12T02:59:26.000Z
dnnlib/__init__.py
icon-lab/FedGIMP
c110c4c12d56bb8fa53546e22330797212e7b197
[ "MIT" ]
1
2020-11-27T03:20:52.000Z
2020-11-30T06:39:16.000Z
dnnlib/__init__.py
icon-lab/FedGIMP
c110c4c12d56bb8fa53546e22330797212e7b197
[ "MIT" ]
8
2020-11-06T02:46:45.000Z
2022-03-24T07:48:23.000Z
 from . import submission from .submission.run_context import RunContext from .submission.submit import SubmitTarget from .submission.submit import PathType from .submission.submit import SubmitConfig from .submission.submit import get_path_from_template from .submission.submit import submit_run from .util import EasyDict submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
30.4
126
0.835526
61
456
6.163934
0.491803
0.223404
0.265957
0.345745
0
0
0
0
0
0
0
0
0.122807
456
14
127
32.571429
0.9375
0.195175
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.888889
null
null
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
31d81088cd22916cb0a6d37928520e172773dbe4
1,992
py
Python
data_preparation/scaling.py
RaphaelHippe/Machine-Learning-Project-Template
d8d6df12cba57de6b8a582385294372a6bc163f0
[ "MIT" ]
null
null
null
data_preparation/scaling.py
RaphaelHippe/Machine-Learning-Project-Template
d8d6df12cba57de6b8a582385294372a6bc163f0
[ "MIT" ]
null
null
null
data_preparation/scaling.py
RaphaelHippe/Machine-Learning-Project-Template
d8d6df12cba57de6b8a582385294372a6bc163f0
[ "MIT" ]
null
null
null
import pandas as pd from util.exceptions import DataFrameTypeError ''' NAME: apply_max_abs_scaling PARAMS: - df: the DataFrame containing the data DESCRIPTION: RETURN: the manipulated DataFrame ''' def apply_max_abs_scaling(df): if not isinstance(df, pd.DataFrame): raise DataFrameTypeError('df', df) from sklearn.preprocessing import MaxAbsScaler df[df.columns] = MaxAbsScaler().fit_transform(df[df.columns]) return df ''' NAME: apply_min_max_scaling PARAMS: - df: the DataFrame containing the data DESCRIPTION: RETURN: the manipulated DataFrame ''' def apply_min_max_scaling(df): if not isinstance(df, pd.DataFrame): raise DataFrameTypeError('df', df) from sklearn.preprocessing import MinMaxScaler df[df.columns] = MinMaxScaler().fit_transform(df[df.columns]) return df ''' NAME: apply_normalizer_scaling PARAMS: - df: the DataFrame containing the data DESCRIPTION: RETURN: the manipulated DataFrame ''' def apply_normalizer_scaling(df): if not isinstance(df, pd.DataFrame): raise DataFrameTypeError('df', df) from sklearn.preprocessing import Normalizer df[df.columns] = Normalizer().fit_transform(df[df.columns]) return df ''' NAME: apply_robust_scaler_scaling PARAMS: - df: the DataFrame containing the data DESCRIPTION: RETURN: the manipulated DataFrame ''' def apply_robust_scaler_scaling(df): if not isinstance(df, pd.DataFrame): raise DataFrameTypeError('df', df) from sklearn.preprocessing import RobustScaler df[df.columns] = RobustScaler().fit_transform(df[df.columns]) return df ''' NAME: apply_standard_scaler_scaling PARAMS: - df: the DataFrame containing the data DESCRIPTION: RETURN: the manipulated DataFrame ''' def apply_standard_scaler_scaling(df): if not isinstance(df, pd.DataFrame): raise DataFrameTypeError('df', df) from sklearn.preprocessing import StandardScaler df[df.columns] = StandardScaler().fit_transform(df[df.columns]) return df
26.918919
67
0.748996
252
1,992
5.789683
0.162698
0.041124
0.075394
0.061686
0.784784
0.784784
0.784784
0.763537
0.763537
0.653873
0
0
0.15512
1,992
73
68
27.287671
0.866904
0
0
0.46875
0
0
0.007547
0
0
0
0
0
0
1
0.15625
false
0
0.21875
0
0.53125
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
31d833060e53af8151fccaa659c51a2486596b5e
209
py
Python
src/awkward/forth.py
HenryDayHall/awkward-1.0
4a860e775502f9adb953524c35c5a2de8f7a3181
[ "BSD-3-Clause" ]
null
null
null
src/awkward/forth.py
HenryDayHall/awkward-1.0
4a860e775502f9adb953524c35c5a2de8f7a3181
[ "BSD-3-Clause" ]
null
null
null
src/awkward/forth.py
HenryDayHall/awkward-1.0
4a860e775502f9adb953524c35c5a2de8f7a3181
[ "BSD-3-Clause" ]
null
null
null
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE from __future__ import absolute_import from awkward._ext import ForthMachine32 from awkward._ext import ForthMachine64
29.857143
87
0.822967
31
209
5.322581
0.709677
0.133333
0.169697
0.242424
0
0
0
0
0
0
0
0.037037
0.095694
209
6
88
34.833333
0.835979
0.406699
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
31eca0dcf39039c0235ddbfebf2cb3d8ca6d697c
77
py
Python
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/core/fake_ops.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
5
2019-01-19T23:53:35.000Z
2022-01-29T14:04:31.000Z
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/core/fake_ops.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
6
2020-01-28T23:08:49.000Z
2022-02-10T00:27:19.000Z
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/core/fake_ops.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
8
2020-12-14T15:49:24.000Z
2022-03-23T18:38:36.000Z
class Fake(object): def __init__(self, shape): self.shape = shape
25.666667
30
0.636364
10
77
4.5
0.7
0.4
0
0
0
0
0
0
0
0
0
0
0.246753
77
3
31
25.666667
0.775862
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
9ed27974547591ceccd142b31d81f5cabceb4a39
43
py
Python
tests/__init__.py
TeriForey/sudoku_solver
9a311c1509455b73d7b8fee65909e5fd1fcda009
[ "BSD-3-Clause" ]
null
null
null
tests/__init__.py
TeriForey/sudoku_solver
9a311c1509455b73d7b8fee65909e5fd1fcda009
[ "BSD-3-Clause" ]
null
null
null
tests/__init__.py
TeriForey/sudoku_solver
9a311c1509455b73d7b8fee65909e5fd1fcda009
[ "BSD-3-Clause" ]
null
null
null
"""Unit test package for sudoku_solver."""
21.5
42
0.72093
6
43
5
1
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.789474
0.837209
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
9ed606835a823a447068986c5f3cfdc7917856ab
768
py
Python
extra/reforge.py
barczynsky/pytoo
9994cd861d9f491f09dcf66470d4c836dfc2a35e
[ "MIT" ]
1
2018-02-04T13:48:28.000Z
2018-02-04T13:48:28.000Z
extra/reforge.py
barczynsky/pytoo
9994cd861d9f491f09dcf66470d4c836dfc2a35e
[ "MIT" ]
null
null
null
extra/reforge.py
barczynsky/pytoo
9994cd861d9f491f09dcf66470d4c836dfc2a35e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import builtins class Print(object): __true_print = builtins.print __sham_print_args = () __sham_print_kwargs = {} @staticmethod def __sham_print(*args, **kwargs): return Print.__true_print(*(*Print.__sham_print_args, *args), **{**Print.__sham_print_kwargs, **kwargs}) @staticmethod def __call__(*args, **kwargs): return builtins.print(*args, *kwargs) @staticmethod def append(*args, **kwargs): Print.__sham_print_args = (*Print.__sham_print_args, *args) Print.__sham_print_kwargs = {**Print.__sham_print_kwargs, **kwargs} builtins.print = Print.__sham_print return Print @staticmethod def clear(): Print.__sham_print_args = () Print.__sham_print_kwargs = {} builtins.print = Print.__true_print return Print
24.774194
106
0.736979
97
768
5.226804
0.195876
0.213018
0.276134
0.177515
0.33925
0.2643
0.2643
0.16568
0.16568
0
0
0.001495
0.128906
768
30
107
25.6
0.756353
0.027344
0
0.26087
0
0
0
0
0
0
0
0
0
1
0.173913
false
0
0.043478
0.086957
0.565217
0.521739
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
1
0
5
73057b9b19f501170bd67bf80f4d48b3631a6976
54
py
Python
flask/database.py
ingochris/justicematch
e37a68965af82f57cd9f6ac524b3c36f6994a4fe
[ "MIT" ]
4
2017-10-21T19:40:59.000Z
2021-04-21T17:06:57.000Z
flask/database.py
ingochris/justicematch
e37a68965af82f57cd9f6ac524b3c36f6994a4fe
[ "MIT" ]
null
null
null
flask/database.py
ingochris/justicematch
e37a68965af82f57cd9f6ac524b3c36f6994a4fe
[ "MIT" ]
3
2017-10-21T17:49:08.000Z
2019-03-04T01:52:44.000Z
# -*- coding:utf-8 -*- from extensions import nosql
10.8
28
0.648148
7
54
5
1
0
0
0
0
0
0
0
0
0
0
0.022727
0.185185
54
4
29
13.5
0.772727
0.37037
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7317c83c5d01f1b0f521e27add7e70874df923d0
515
py
Python
apps/auth/__init__.py
Deteriorator/Book.Flask.Web
1458a147f2ef314d14d73e62e573cc30a9827903
[ "Apache-2.0" ]
null
null
null
apps/auth/__init__.py
Deteriorator/Book.Flask.Web
1458a147f2ef314d14d73e62e573cc30a9827903
[ "Apache-2.0" ]
1
2021-02-02T22:45:15.000Z
2021-02-02T22:45:15.000Z
apps/auth/__init__.py
Deteriorator/Book.Flask.Web
1458a147f2ef314d14d73e62e573cc30a9827903
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ------------------------------------------------------------------------------- @Name: __init__.py @Desc: @Author: liangz.org@gmail.com @Create: 2020.05.24 15:31 ------------------------------------------------------------------------------- @Change: 2020.05.24 ------------------------------------------------------------------------------- """ from flask import Blueprint auth = Blueprint('auth', __name__) from . import views
24.52381
79
0.314563
37
515
4.162162
0.783784
0.077922
0.103896
0
0
0
0
0
0
0
0
0.047191
0.135922
515
20
80
25.75
0.298876
0.805825
0
0
0
0
0.044944
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
7334063e447435d6c232c39c437b5e7252b53e32
44
py
Python
ntcl_build_tools/__init__.py
cheshyre/ntcl-build
133bb44af6c7a2017431ca78fd0e60b91396322a
[ "BSD-3-Clause" ]
null
null
null
ntcl_build_tools/__init__.py
cheshyre/ntcl-build
133bb44af6c7a2017431ca78fd0e60b91396322a
[ "BSD-3-Clause" ]
null
null
null
ntcl_build_tools/__init__.py
cheshyre/ntcl-build
133bb44af6c7a2017431ca78fd0e60b91396322a
[ "BSD-3-Clause" ]
null
null
null
from .build_framework import BuildFramework
22
43
0.886364
5
44
7.6
1
0
0
0
0
0
0
0
0
0
0
0
0.090909
44
1
44
44
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
73577c35abd4654a6a456f4fb893514d6036b800
181
py
Python
backend/limiter.py
lie-flat/cfps-jupyterhub
a882984310629e9441b76000707e92cf05815e12
[ "BSD-3-Clause" ]
3
2022-02-15T12:42:18.000Z
2022-03-10T13:17:49.000Z
backend/limiter.py
lie-flat/cfps-jupyterhub
a882984310629e9441b76000707e92cf05815e12
[ "BSD-3-Clause" ]
null
null
null
backend/limiter.py
lie-flat/cfps-jupyterhub
a882984310629e9441b76000707e92cf05815e12
[ "BSD-3-Clause" ]
null
null
null
from slowapi import Limiter from slowapi.util import get_remote_address from .config import RATE_LIMIT limiter = Limiter(key_func=get_remote_address, default_limits=[RATE_LIMIT])
25.857143
75
0.845304
27
181
5.37037
0.555556
0.151724
0.22069
0
0
0
0
0
0
0
0
0
0.099448
181
6
76
30.166667
0.889571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
b401ef1dd4cf198fb831045103723040ac396127
111
py
Python
kivymt/calendar/__init__.py
inteplus/kivymt
e7d7e701ed568b3bae9bbb2d8a2980bf106dcd05
[ "MIT" ]
null
null
null
kivymt/calendar/__init__.py
inteplus/kivymt
e7d7e701ed568b3bae9bbb2d8a2980bf106dcd05
[ "MIT" ]
null
null
null
kivymt/calendar/__init__.py
inteplus/kivymt
e7d7e701ed568b3bae9bbb2d8a2980bf106dcd05
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- coding: utf-8 -*- from kivymt.calendar.calendar_ui import DatePicker, CalendarWidget
22.2
66
0.738739
14
111
5.785714
0.928571
0
0
0
0
0
0
0
0
0
0
0.020202
0.108108
111
4
67
27.75
0.79798
0.351351
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b42741f5d2805d45b83e00dae0f4a9673d04b27d
21
py
Python
2-conda/hello.py
davesanjay/reproducible-computational-workflows
e0e873f4354cc51099d0dd503823b9b70af8b825
[ "MIT" ]
32
2019-07-16T20:29:36.000Z
2020-11-20T01:37:42.000Z
2-conda/hello.py
davesanjay/reproducible-computational-workflows
e0e873f4354cc51099d0dd503823b9b70af8b825
[ "MIT" ]
11
2019-07-03T06:48:09.000Z
2021-06-08T17:44:16.000Z
2-conda/hello.py
davesanjay/reproducible-computational-workflows
e0e873f4354cc51099d0dd503823b9b70af8b825
[ "MIT" ]
25
2019-07-02T11:10:52.000Z
2021-05-21T05:21:50.000Z
print "Hello Basel!"
10.5
20
0.714286
3
21
5
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
21
1
21
21
0.833333
0
0
0
0
0
0.571429
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
b42b1d907479c45e7aea8aea9ea02445089464b7
50
py
Python
cupy_alias/manipulation/transpose.py
fixstars/clpy
693485f85397cc110fa45803c36c30c24c297df0
[ "BSD-3-Clause" ]
142
2018-06-07T07:43:10.000Z
2021-10-30T21:06:32.000Z
cupy_alias/manipulation/transpose.py
fixstars/clpy
693485f85397cc110fa45803c36c30c24c297df0
[ "BSD-3-Clause" ]
282
2018-06-07T08:35:03.000Z
2021-03-31T03:14:32.000Z
cupy_alias/manipulation/transpose.py
fixstars/clpy
693485f85397cc110fa45803c36c30c24c297df0
[ "BSD-3-Clause" ]
19
2018-06-19T11:07:53.000Z
2021-05-13T20:57:04.000Z
from clpy.manipulation.transpose import * # NOQA
25
49
0.78
6
50
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.14
50
1
50
50
0.906977
0.08
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b431d86e902b0ae933e0fb8fe873ab8e6ef20239
8,314
py
Python
test/python/DataTypeDocument.py
cnangel/HyperDex
b272e85b08d232993baf6105a4beba833deadfe3
[ "BSD-3-Clause" ]
1
2016-08-10T07:53:58.000Z
2016-08-10T07:53:58.000Z
test/python/DataTypeDocument.py
cnangel/HyperDex
b272e85b08d232993baf6105a4beba833deadfe3
[ "BSD-3-Clause" ]
null
null
null
test/python/DataTypeDocument.py
cnangel/HyperDex
b272e85b08d232993baf6105a4beba833deadfe3
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python2 import sys import hyperdex.client import json import os from testlib import * from hyperdex.client import * c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2])) def to_objectset(xs): return set([frozenset(x.items()) for x in xs]) # Empty Document assertTrue(c.put('kv', 'k', {})) assertEquals(c.get('kv', 'k')['v'], Document({})) # Basic Stuff assertTrue(c.put('kv', 'k', {'v': Document({})})) assertEquals(c.get('kv', 'k')['v'], Document({})) assertTrue(c.put('kv', 'k', {'v': Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }})})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }})) assertFalse(c.atomic_add('kv', 'k', {'v.a': 1})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 1, 'e': 'f', 'g': -2 }})) assertTrue(c.atomic_add('kv', 'k', {'v.c.d' : 5})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 6, 'e': 'f', 'g': -2 }})) assertTrue(c.atomic_add('kv', 'k', {'v.c.d' : 5, 'v.c.g': 5})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'b', 'c': {'d' : 11, 'e': 'f' , 'g': 3}})) assertTrue(c.string_prepend('kv', 'k', {'v.a' : 'x', 'v.c.e': 'z'})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xb', 'c': {'d' : 11, 'e': 'zf', 'g': 3}})) assertTrue(c.string_append('kv', 'k', {'v.a' : 'x', 'v.c.e': 'z'})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}})) assertTrue(c.string_append('kv', 'k', {'v.k.l': 'm'})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'l' : 'm'}})) assertTrue(c.atomic_add('kv', 'k', {'v.k.a.b.c.d' : 1})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'a': {'b' : {'c' : {'d' : 1}}}, 'l' : 'm'}})) assertTrue(c.atomic_sub('kv', 'k', {'v.k.a.b.c.d' : 5})) assertEquals(c.get('kv', 'k')['v'], Document({'a': 'xbx', 'c': {'d' : 11, 'e': 'zfz', 'g': 3}, 'k' : {'a': {'b' : {'c' : {'d' : -4}}}, 'l' : 'm'}})) # Bit operations assertTrue(c.put('kv', 'k3', {'v': Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }})})) assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }})) assertTrue(c.atomic_mod('kv', 'k3', {'v.c.d' : 10000})) assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 100, 'e': 'f', 'g': 5 }})) assertTrue(c.atomic_mod('kv', 'k3', {'v.c.d' : 22})) assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 5 }})) assertTrue(c.atomic_xor('kv', 'k3', {'v.c.g' : 4})) assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 1 }})) assertTrue(c.atomic_or('kv', 'k3', {'v.c.g' : 4})) assertEquals(c.get('kv', 'k3')['v'], Document({'a': 'b', 'c': {'d' : 12, 'e': 'f', 'g': 5 }})) # Multiply and divide assertTrue(c.put('kv', 'k4', {'v': Document({'a': 200})})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 200 })) assertTrue(c.atomic_div('kv', 'k4', {'v.a' : 1})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 200 })) assertTrue(c.atomic_div('kv', 'k4', {'v.a' : 2})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 100 })) assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 4})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 400 })) assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 1})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 400 })) assertTrue(c.atomic_mul('kv', 'k4', {'v.a' : 0})) assertEquals(c.get('kv', 'k4')['v'], Document({ 'a': 0 })) # Floating point numbers assertTrue(c.put('kv', 'k10', {'v': Document({'a': 200})})) assertTrue(c.atomic_add('kv', 'k10', {'v.a' : 100.0})) assertEquals(c.get('kv', 'k10')['v'], Document({ 'a': 300.0 })) assertTrue(c.atomic_mul('kv', 'k10', {'v.a' : 1.5})) assertEquals(c.get('kv', 'k10')['v'], Document({ 'a': 450.0 })) # Build a new subdocument assertTrue(c.put('kv', 'k6', {'v' : Document({'a' : 100})})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 })) assertFalse(c.atomic_add('kv', 'k6', {'v.a.b' :1})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 })) assertTrue(c.atomic_add('kv', 'k6', {'v.c.b' :1})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}})) assertTrue(c.string_prepend('kv', 'k6', {'v.i.j.k' : 'xyz'})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}})) assertFalse(c.string_prepend('kv', 'k6', {'v.i.j' : 'xyz'})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}})) assertTrue(c.put('kv', 'k6', {'v.d' : Document({'q' : 1})})) assertEquals(c.get('kv', 'k6')['v'], Document({ 'a': 100 , 'c' : {'b' :1}, 'i' : {'j' : {'k' : 'xyz'}}, 'd' : {'q' : 1}})) # Remove a property assertTrue(c.put('kv', 'k7', {'v' : Document({'a' : {'b' : 3}})})) assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'b' : 3}})) assertTrue(c.document_unset('kv', 'k7', {'v.a.b' : 1})) assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {}})) assertFalse(c.document_unset('kv', 'k7', {'v.a.b' : 1})) # Rename a property assertTrue(c.put('kv', 'k7', {'v' : Document({'a' : {'b' : 3}})})) assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'b' : 3}})) assertTrue(c.document_rename('kv', 'k7', {'v.a.b' : 'a.c'})) assertEquals(c.get('kv', 'k7')['v'], Document({'a' : {'c' : 3}})) assertFalse(c.document_rename('kv', 'k7', {'v.a.b' : 'c'})) assertFalse(c.document_rename('kv', 'k7', {'v.a.b' : 'b'})) # Set new values (returns false if they already exist) assertTrue(c.put('kv', 'k8', {'v' : Document({'a' : { 'b' : 'c'}})})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}})) assertTrue(c.put('kv', 'k8', {'v.a.b' : 'c'})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}})) assertTrue(c.put('kv', 'k8', {'v.a.b' : 'c'})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c'}})) assertTrue(c.put('kv', 'k8', {'v.a.c' : 1})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 1}})) assertTrue(c.put('kv', 'k8', {'v.a.c' : 2})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 2}})) assertTrue(c.put('kv', 'k8', {'v.a.c' : 'c'})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}})) assertTrue(c.put('kv', 'k8', {'v.b.a' : 1, 'v.b.b' : 1, 'v.b.c' : 'xyz'})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}})) assertTrue(c.put('kv', 'k8', {'v.c' : Document({'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}})})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}, 'c' : {'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}}})) assertTrue(c.put('kv', 'k8', {'v.d' : 2.5})) assertEquals(c.get('kv', 'k8')['v'], Document({'a' : {'b' : 'c', 'c' : 'c'}, 'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}, 'c' : {'b' : {'a' : 1, 'b' : 1, 'c' : 'xyz'}}, 'd' : 2.5})) # Arrays assertTrue(c.put('kv', 'k11', {'v' : Document({'a' : [1,2,3,0]})})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,0]})) assertTrue(c.put('kv', 'k11', {'v.a[3]' : 4})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4]})) assertFalse(c.put('kv', 'k11', {'v.a[3].b' : 4})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4]})) assertTrue(c.list_rpush('kv', 'k11', {'v.a' : "5"})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4,"5"]})) assertTrue(c.list_rpush('kv', 'k11', {'v.a' : Document({'x':'y'})})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [1,2,3,4,"5",{'x':'y'}]})) assertTrue(c.list_lpush('kv', 'k11', {'v.a' : 0})) assertEquals(c.get('kv', 'k11')['v'], Document({'a' : [0,1,2,3,4,"5",{'x':'y'}]})) # Search on Documents assertTrue(c.put('kv', 'k9', {'v' : Document({'x' : {'b' : 'c'}})})) res1 = c.search('kv', {'v.x.b' : 'c'}) res2 = c.search('kv', {'v.x' : Document({'b' : 'c'})}) res3 = c.search('kv', {'v' : Document({'x' : {'b' : 'c'}})}) assertEquals(res1.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})}) assertFalse(res1.hasNext()) assertEquals(res2.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})}) assertFalse(res2.hasNext()) assertEquals(res3.next(), {'k' : 'k9', 'v' : Document({'x' : {'b' : 'c'}})}) assertFalse(res3.hasNext())
55.798658
175
0.487371
1,375
8,314
2.923636
0.090909
0.143284
0.139303
0.219403
0.821144
0.782836
0.737811
0.673632
0.579602
0.497512
0
0.043173
0.12521
8,314
148
176
56.175676
0.509556
0.029589
0
0.223141
0
0
0.131719
0
0
0
0
0
0.900826
1
0.008264
false
0
0.049587
0.008264
0.066116
0
0
0
0
null
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
b43761527777f6e70949fda16bdfbc228fed95f6
55
py
Python
jinahub/encoders/text/TransformerTorchEncoder/__init__.py
vivek2301/executors
8159681d68408ab8f797497bc3374be77e6ca392
[ "Apache-2.0" ]
null
null
null
jinahub/encoders/text/TransformerTorchEncoder/__init__.py
vivek2301/executors
8159681d68408ab8f797497bc3374be77e6ca392
[ "Apache-2.0" ]
null
null
null
jinahub/encoders/text/TransformerTorchEncoder/__init__.py
vivek2301/executors
8159681d68408ab8f797497bc3374be77e6ca392
[ "Apache-2.0" ]
null
null
null
from .transform_encoder import TransformerTorchEncoder
27.5
54
0.909091
5
55
9.8
1
0
0
0
0
0
0
0
0
0
0
0
0.072727
55
1
55
55
0.960784
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b47218709f2a4c4a12b1247f7e8449204ad8a78e
11,955
py
Python
tests/unit/opera/parser/test_tosca.py
radon-h2020/xopera-opera
8b720d335f6fd6e693796a3bf447a84f557886ac
[ "Apache-2.0" ]
null
null
null
tests/unit/opera/parser/test_tosca.py
radon-h2020/xopera-opera
8b720d335f6fd6e693796a3bf447a84f557886ac
[ "Apache-2.0" ]
16
2020-03-09T14:05:58.000Z
2021-06-30T07:56:37.000Z
tests/unit/opera/parser/test_tosca.py
radon-h2020/xopera-opera
8b720d335f6fd6e693796a3bf447a84f557886ac
[ "Apache-2.0" ]
2
2020-06-15T12:17:44.000Z
2020-09-08T09:28:27.000Z
import pathlib import pytest from opera.error import ParseError from opera.parser import tosca from opera.storage import Storage class TestLoad: def test_load_minimal_document(self, tmp_path): name = pathlib.PurePath("root.yaml") (tmp_path / name).write_text("tosca_definitions_version: tosca_simple_yaml_1_3") doc = tosca.load(tmp_path, name) assert doc.tosca_definitions_version.data == "tosca_simple_yaml_1_3" def test_empty_document_is_invalid(self, tmp_path): name = pathlib.PurePath("empty.yaml") (tmp_path / name).write_text("{}") with pytest.raises(ParseError): tosca.load(tmp_path, name) @pytest.mark.parametrize("typ", [ ("data_types", "tosca.datatypes.xml"), ("artifact_types", "tosca.artifacts.Implementation.Bash"), ("capability_types", "tosca.capabilities.Node"), ("relationship_types", "tosca.relationships.HostedOn"), ("interface_types", "tosca.interfaces.Root"), ("node_types", "tosca.nodes.Root"), ("group_types", "tosca.groups.Root"), ("policy_types", "tosca.policies.Root"), ]) def test_stdlib_is_present(self, tmp_path, typ): name = pathlib.PurePath("stdlib.yaml") (tmp_path / name).write_text( "tosca_definitions_version: tosca_simple_yaml_1_3", ) doc = tosca.load(tmp_path, name) assert doc.dig(*typ) is not None @pytest.mark.parametrize("typ", [ ("data_types", "tosca.datatypes.xml"), ("artifact_types", "tosca.artifacts.Implementation.Bash"), ("capability_types", "tosca.capabilities.Node"), ("relationship_types", "tosca.relationships.HostedOn"), ("interface_types", "tosca.interfaces.Root"), ("node_types", "tosca.nodes.Root"), ("group_types", "tosca.groups.Root"), ("policy_types", "tosca.policies.Root"), ]) def test_custom_type_is_present(self, tmp_path, yaml_text, typ): name = pathlib.PurePath("custom.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 {}: my.custom.Type: derived_from: {} """.format(*typ) )) doc = tosca.load(tmp_path, name) assert doc.dig(typ[0], "my.custom.Type") is not None def test_loads_template_part(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: my_node: type: tosca.nodes.SoftwareComponent """ )) doc = tosca.load(tmp_path, name) assert doc.topology_template.node_templates["my_node"] is not None def test_load_from_csar_subfolder(self, tmp_path, yaml_text): name = pathlib.PurePath("sub/folder/file.yaml") (tmp_path / name).parent.mkdir(parents=True) (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: - imp.yaml """ )) (tmp_path / "sub/folder/imp.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 data_types: my_type: derived_from: tosca.datatypes.xml """ )) doc = tosca.load(tmp_path, name) assert doc.data_types["my_type"] def test_duplicate_import(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: [ template.yaml ] """ )) tosca.load(tmp_path, name) def test_imports_from_multiple_levels(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: - subfolder/a.yaml - subfolder/b.yaml """ )) (tmp_path / "subfolder").mkdir() (tmp_path / "subfolder/a.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: - b.yaml """ )) (tmp_path / "subfolder/b.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 data_types: my_type: derived_from: tosca.datatypes.xml """ )) tosca.load(tmp_path, name) def test_merge_topology_template(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: - merge.yaml topology_template: inputs: some-input: type: string node_templates: my_node: type: tosca.nodes.SoftwareComponent """ )) (tmp_path / "merge.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: inputs: other-input: type: string node_templates: other_node: type: tosca.nodes.SoftwareComponent """ )) tosca.load(tmp_path, name) def test_merge_duplicate_node_templates_invalid(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 imports: - merge1.yaml - merge2.yaml topology_template: node_templates: my_node: type: tosca.nodes.SoftwareComponent """ )) (tmp_path / "merge1.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: other_node: type: tosca.nodes.SoftwareComponent """ )) (tmp_path / "merge2.yaml").write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: other_node: type: tosca.nodes.SoftwareComponent """ )) with pytest.raises(ParseError): tosca.load(tmp_path, name) class TestExecute: def test_undefined_required_properties1(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: my_node_type: derived_from: tosca.nodes.Root attributes: test_attribute: type: boolean properties: test_property1: type: integer default: 42 required: false test_property2: type: float default: 42.0 required: true test_property3: type: string required: true topology_template: node_templates: my_node_template: type: my_node_type """ )) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Missing a required property: test_property3"): ast.get_template({}) def test_undefined_required_properties2(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: my_node_type: derived_from: tosca.nodes.Root properties: test_prop1: type: integer required: false test_prop2: type: float default: 42.0 test_prop3: type: string topology_template: node_templates: my_node_template: type: my_node_type """ )) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Missing a required property: test_prop3"): ast.get_template({}) def test_undefined_required_properties3(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: my_node_type: derived_from: tosca.nodes.Root properties: property1: type: integer property2: type: float property3: type: string topology_template: node_templates: my_node_template: type: my_node_type properties: property1: 42 property2: 42.0 """ )) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Missing a required property: property3"): ast.get_template({}) def test_undeclared_requirements(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: node_1: type: tosca.nodes.SoftwareComponent node_2: type: tosca.nodes.SoftwareComponent requirements: - dependency: node_1 node_3: type: tosca.nodes.SoftwareComponent requirements: - dependency_not_defined1: node_1 """ )) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Undeclared requirements: dependency_not_defined1"): ast.get_template({})
34.652174
97
0.542116
1,182
11,955
5.168359
0.114213
0.061876
0.055819
0.052382
0.802423
0.768538
0.725323
0.707317
0.671468
0.656409
0
0.010782
0.363864
11,955
344
98
34.752907
0.792505
0.019824
0
0.634328
0
0
0.196354
0.05438
0
0
0
0
0.037313
1
0.104478
false
0
0.052239
0
0.171642
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
81ebfe59202563857aae9394ebfbec9e50e030b9
179
py
Python
key.py
crlna08/Project_2_La_restaurants
b9ebb0e14e6a682116b861c6de6eeb0fd68fdb23
[ "Apache-2.0" ]
null
null
null
key.py
crlna08/Project_2_La_restaurants
b9ebb0e14e6a682116b861c6de6eeb0fd68fdb23
[ "Apache-2.0" ]
null
null
null
key.py
crlna08/Project_2_La_restaurants
b9ebb0e14e6a682116b861c6de6eeb0fd68fdb23
[ "Apache-2.0" ]
4
2021-03-21T21:21:52.000Z
2021-03-24T04:48:47.000Z
somthing = 'F5fjDxitafeZwPdwsmBL-Q' key_api = 'MiT75-jn5D_6MENzkehT5EReeVdYhp_86hQKYQp-3o10wIVXAbOakIT6khg8Y-_PBiy2fKPuAQFp9x7W80Ubn3xdiS8eCfy-nc7qtLNvs6oyAzdU2CsCRHEniyJUYHYx'
35.8
140
0.893855
12
179
13
1
0
0
0
0
0
0
0
0
0
0
0.134503
0.044693
179
5
140
35.8
0.777778
0
0
0
0
0
0.837989
0.837989
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c31581c462a2841b2518e31e3af6fc2ac301a6e4
67
py
Python
mysql_clickhouse_replication/__init__.py
NoSkillGuy/mysql-clickhouse-replication
5970cd2d7bfa916c310dbb95c1548a380ef68181
[ "MIT" ]
3
2019-04-11T07:02:36.000Z
2019-07-01T12:45:15.000Z
mysql_clickhouse_replication/__init__.py
NoSkillGuy/mysql-clickhouse-replication
5970cd2d7bfa916c310dbb95c1548a380ef68181
[ "MIT" ]
null
null
null
mysql_clickhouse_replication/__init__.py
NoSkillGuy/mysql-clickhouse-replication
5970cd2d7bfa916c310dbb95c1548a380ef68181
[ "MIT" ]
5
2019-06-13T16:50:52.000Z
2020-01-14T13:06:06.000Z
from .mysql_clickhouse_replication import Mysql2clickhousesql, main
67
67
0.910448
7
67
8.428571
1
0
0
0
0
0
0
0
0
0
0
0.015873
0.059701
67
1
67
67
0.920635
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c37dc82e888de93cc4ebf26350410095213c1be1
7,372
py
Python
controlm_client/__init__.py
apsinha-equinix/controlm-client
f24e0f935c82306074f4e4025cf62c217348dc3f
[ "MIT" ]
1
2021-12-02T08:49:25.000Z
2021-12-02T08:49:25.000Z
controlm_client/__init__.py
apsinha-equinix/controlm-client
f24e0f935c82306074f4e4025cf62c217348dc3f
[ "MIT" ]
null
null
null
controlm_client/__init__.py
apsinha-equinix/controlm-client
f24e0f935c82306074f4e4025cf62c217348dc3f
[ "MIT" ]
null
null
null
# coding: utf-8 # flake8: noqa """ Control-M Services Provides access to BMC Control-M Services # noqa: E501 OpenAPI spec version: 9.18.3 Contact: support@bmc.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import # import apis into sdk package from controlm_client.api.build_api import BuildApi from controlm_client.api.config_api import ConfigApi from controlm_client.api.deploy_api import DeployApi from controlm_client.api.provision_api import ProvisionApi from controlm_client.api.reporting_api import ReportingApi from controlm_client.api.run_api import RunApi from controlm_client.api.samples_api import SamplesApi from controlm_client.api.session_api import SessionApi # import ApiClient from controlm_client.api_client import ApiClient from controlm_client.configuration import Configuration # import models into sdk package from controlm_client.models.add_agent_params import AddAgentParams from controlm_client.models.add_remote_host_params import AddRemoteHostParams from controlm_client.models.add_remove_success_data import AddRemoveSuccessData from controlm_client.models.add_server_params import AddServerParams from controlm_client.models.agent_details import AgentDetails from controlm_client.models.agent_details_list import AgentDetailsList from controlm_client.models.agent_in_group_params import AgentInGroupParams from controlm_client.models.agents_in_group_list_result import AgentsInGroupListResult from controlm_client.models.agents_in_group_success_data import AgentsInGroupSuccessData from controlm_client.models.allowed_job_actions import AllowedJobActions from controlm_client.models.allowed_jobs import AllowedJobs from controlm_client.models.archive_jobs_list import ArchiveJobsList from controlm_client.models.archive_rules_list import ArchiveRulesList from controlm_client.models.associate_data import AssociateData from controlm_client.models.authentication_data import AuthenticationData from controlm_client.models.client_access_privilege_category import ClientAccessPrivilegeCategory from controlm_client.models.configuration_manager_privilege_category import ConfigurationManagerPrivilegeCategory from controlm_client.models.control_m_authentication_data import ControlMAuthenticationData from controlm_client.models.ctm_details import CtmDetails from controlm_client.models.ctm_details_list import CtmDetailsList from controlm_client.models.deployment_file_error import DeploymentFileError from controlm_client.models.deployment_file_results import DeploymentFileResults from controlm_client.models.error_data import ErrorData from controlm_client.models.error_list import ErrorList from controlm_client.models.event import Event from controlm_client.models.event_param import EventParam from controlm_client.models.event_set import EventSet from controlm_client.models.external_provider_authentication_data import ExternalProviderAuthenticationData from controlm_client.models.folder_auth import FolderAuth from controlm_client.models.hostgroup_agent_participation import HostgroupAgentParticipation from controlm_client.models.job import Job from controlm_client.models.job_level_auth import JobLevelAuth from controlm_client.models.job_run_status import JobRunStatus from controlm_client.models.job_status_result import JobStatusResult from controlm_client.models.key_value import KeyValue from controlm_client.models.key_value_list_result import KeyValueListResult from controlm_client.models.login_credentials import LoginCredentials from controlm_client.models.login_result import LoginResult from controlm_client.models.matching import Matching from controlm_client.models.monitoring_privilege_category import MonitoringPrivilegeCategory from controlm_client.models.new_sample import NewSample from controlm_client.models.order_folder_parameters import OrderFolderParameters from controlm_client.models.passwords_object import PasswordsObject from controlm_client.models.ping_agent_params import PingAgentParams from controlm_client.models.planning_privilege_category import PlanningPrivilegeCategory from controlm_client.models.privilege_name import PrivilegeName from controlm_client.models.privilege_name_controlm import PrivilegeNameControlm from controlm_client.models.privileges import Privileges from controlm_client.models.provision_advance_parameters import ProvisionAdvanceParameters from controlm_client.models.report_result import ReportResult from controlm_client.models.resource_max import ResourceMax from controlm_client.models.resource_obj import ResourceObj from controlm_client.models.resource_param import ResourceParam from controlm_client.models.resource_set import ResourceSet from controlm_client.models.role_data import RoleData from controlm_client.models.role_header import RoleHeader from controlm_client.models.role_header_list import RoleHeaderList from controlm_client.models.rule import Rule from controlm_client.models.rule_criteria import RuleCriteria from controlm_client.models.run_as_user_data import RunAsUserData from controlm_client.models.run_as_user_details_data import RunAsUserDetailsData from controlm_client.models.run_as_user_key_data import RunAsUserKeyData from controlm_client.models.run_as_users_list import RunAsUsersList from controlm_client.models.run_report import RunReport from controlm_client.models.run_report_info import RunReportInfo from controlm_client.models.run_result import RunResult from controlm_client.models.runas_user_auth import RunasUserAuth from controlm_client.models.sample import Sample from controlm_client.models.samples_load_data import SamplesLoadData from controlm_client.models.search_params import SearchParams from controlm_client.models.secret_key_value import SecretKeyValue from controlm_client.models.secret_value import SecretValue from controlm_client.models.service_auth import ServiceAuth from controlm_client.models.service_auth_action import ServiceAuthAction from controlm_client.models.string_list_result import StringListResult from controlm_client.models.success_data import SuccessData from controlm_client.models.term_group import TermGroup from controlm_client.models.tools_privilege_category import ToolsPrivilegeCategory from controlm_client.models.upgrade_agent_info import UpgradeAgentInfo from controlm_client.models.upgrade_agent_info_list import UpgradeAgentInfoList from controlm_client.models.upgrade_info import UpgradeInfo from controlm_client.models.upgrade_record import UpgradeRecord from controlm_client.models.upgrade_record_list import UpgradeRecordList from controlm_client.models.upgrade_request import UpgradeRequest from controlm_client.models.upgrade_response import UpgradeResponse from controlm_client.models.user_data import UserData from controlm_client.models.user_header import UserHeader from controlm_client.models.user_password import UserPassword from controlm_client.models.value import Value from controlm_client.models.viewpoint_manager_privilege_category import ViewpointManagerPrivilegeCategory from controlm_client.models.workload_policy import WorkloadPolicy from controlm_client.models.workload_policy_list import WorkloadPolicyList from controlm_client.models.workload_policy_state import WorkloadPolicyState from controlm_client.models.workload_policy_state_list import WorkloadPolicyStateList
58.507937
113
0.900705
945
7,372
6.736508
0.248677
0.196041
0.294062
0.354383
0.351241
0.179076
0.053252
0
0
0
0
0.001308
0.066739
7,372
125
114
58.976
0.923983
0.040695
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0.019048
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5ef2e5c2eedd0fb3ba87c64f6c5ee159cea58d39
204
py
Python
src/voting/utils/user_model.py
PiDelport/django-voting
6c772a6985ec9eb293520dc996fed72714fa181e
[ "BSD-3-Clause" ]
28
2019-05-19T03:34:19.000Z
2022-03-29T15:13:18.000Z
src/voting/utils/user_model.py
jazzband/django-voting
6c772a6985ec9eb293520dc996fed72714fa181e
[ "BSD-3-Clause" ]
19
2020-02-22T19:13:44.000Z
2021-12-28T22:43:49.000Z
src/voting/utils/user_model.py
PiDelport/django-voting
6c772a6985ec9eb293520dc996fed72714fa181e
[ "BSD-3-Clause" ]
6
2019-07-10T21:26:43.000Z
2021-11-27T14:08:33.000Z
from django.conf import settings def get_user_model_name(): """ Returns the app_label.object_name string for the user model. """ return getattr(settings, "AUTH_USER_MODEL", "auth.User")
22.666667
64
0.710784
29
204
4.758621
0.689655
0.195652
0
0
0
0
0
0
0
0
0
0
0.186275
204
8
65
25.5
0.831325
0.294118
0
0
0
0
0.1875
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
0
0
0
5
5ef507363f57dd2d9e6dcbaf3a47b6f667752ae3
64
py
Python
src/spaceone/notification/connector/__init__.py
spaceone-dev/plugin-jira-noti-protocol
a9472f3f553aa5a5c0817efbb0e0e508768dad38
[ "Apache-2.0" ]
1
2021-12-18T02:59:03.000Z
2021-12-18T02:59:03.000Z
src/spaceone/notification/connector/__init__.py
choonho/plugin-jira-noti-protocol
c78ebb081dabecb827a3be50847d5bf345717b13
[ "Apache-2.0" ]
null
null
null
src/spaceone/notification/connector/__init__.py
choonho/plugin-jira-noti-protocol
c78ebb081dabecb827a3be50847d5bf345717b13
[ "Apache-2.0" ]
2
2021-12-18T02:59:06.000Z
2022-03-17T08:47:43.000Z
from spaceone.notification.connector.jira import JiraConnector
21.333333
62
0.875
7
64
8
1
0
0
0
0
0
0
0
0
0
0
0
0.078125
64
2
63
32
0.949153
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6f009238099ab17882868ed2692945c3724f9a2b
6,302
py
Python
RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWpNoTtbar.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWpNoTtbar.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWpNoTtbar.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms from CondCore.DBCommon.CondDBCommon_cfi import * PoolDBESSourcebtagMuJetsWpNoTtbar = cms.ESSource("PoolDBESSource", CondDBCommon, toGet = cms.VPSet( # # working points # cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVL_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVL_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVM_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVM_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPL_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPL_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPM_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPM_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_WP') ), cms.PSet( record = cms.string('PerformancePayloadRecord'), tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_T') ), cms.PSet( record = cms.string('PerformanceWPRecord'), tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'), label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_WP') ), )) PoolDBESSourcebtagMuJetsWpNoTtbar.connect = 'frontier://FrontierProd/CMS_COND_PAT_000'
44.06993
101
0.748969
524
6,302
8.801527
0.097328
0.101474
0.073287
0.090199
0.946444
0.946444
0.946444
0.946444
0.487207
0.487207
0
0.014635
0.143447
6,302
142
102
44.380282
0.839755
0.002222
0
0.569343
0
0
0.484246
0.442712
0
0
0
0
0
1
0
false
0
0.014599
0
0.014599
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6f299bf18ac397b7c07422fd33665100cdc33ac7
115
py
Python
order/admin.py
Farzan-ul-haq/POS
607418f90a13b66118fdab47cc07aa6518b7cca7
[ "MIT" ]
null
null
null
order/admin.py
Farzan-ul-haq/POS
607418f90a13b66118fdab47cc07aa6518b7cca7
[ "MIT" ]
null
null
null
order/admin.py
Farzan-ul-haq/POS
607418f90a13b66118fdab47cc07aa6518b7cca7
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Order # Register your models here. admin.site.register(Order)
23
32
0.808696
17
115
5.470588
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.121739
115
5
33
23
0.920792
0.226087
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6f508d665f8d8851e92ae87baddb4863f6b9ceb2
98
py
Python
btrack/__init__.py
dstansby/BayesianTracker
82d1d69cddb0b5e27ba50bf7a1c3c6bbdab99aef
[ "MIT" ]
196
2017-11-27T03:05:19.000Z
2022-03-23T20:04:28.000Z
btrack/__init__.py
dstansby/BayesianTracker
82d1d69cddb0b5e27ba50bf7a1c3c6bbdab99aef
[ "MIT" ]
78
2018-07-14T14:30:02.000Z
2022-03-30T15:11:02.000Z
btrack/__init__.py
dstansby/BayesianTracker
82d1d69cddb0b5e27ba50bf7a1c3c6bbdab99aef
[ "MIT" ]
45
2017-11-27T03:05:20.000Z
2022-03-15T05:57:18.000Z
# __all__ = ['core','utils','constants','render'] from .core import BayesianTracker, __version__
24.5
49
0.72449
10
98
6.3
0.9
0
0
0
0
0
0
0
0
0
0
0
0.102041
98
3
50
32.666667
0.715909
0.479592
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4893984c75f31e98130f067a48dc00ca36667e67
82
py
Python
src/agents/__init__.py
dawsonbooth/pong-ai
d7451aa0e0ac1a08d3b143b9d2df9bfc86e4877a
[ "MIT" ]
null
null
null
src/agents/__init__.py
dawsonbooth/pong-ai
d7451aa0e0ac1a08d3b143b9d2df9bfc86e4877a
[ "MIT" ]
null
null
null
src/agents/__init__.py
dawsonbooth/pong-ai
d7451aa0e0ac1a08d3b143b9d2df9bfc86e4877a
[ "MIT" ]
null
null
null
from . import ai as AI from . import player as Player __all__ = ["Player", "AI"]
16.4
30
0.670732
13
82
3.923077
0.461538
0.392157
0
0
0
0
0
0
0
0
0
0
0.207317
82
4
31
20.5
0.784615
0
0
0
0
0
0.097561
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
48b68c3e1a4fea69c6306a403787a3fa34bd68be
23,413
py
Python
agent/src/tests/acl_tests.py
gonicus/clacks
da579f0acc4e48cf2e9451417ac6792282cf7ab6
[ "ZPL-2.1" ]
2
2015-01-26T07:15:19.000Z
2015-11-09T13:42:11.000Z
agent/src/tests/acl_tests.py
gonicus/clacks
da579f0acc4e48cf2e9451417ac6792282cf7ab6
[ "ZPL-2.1" ]
null
null
null
agent/src/tests/acl_tests.py
gonicus/clacks
da579f0acc4e48cf2e9451417ac6792282cf7ab6
[ "ZPL-2.1" ]
null
null
null
# This file is part of the clacks framework. # # http://clacks-project.org # # Copyright: # (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de # # License: # GPL-2: http://www.gnu.org/licenses/gpl-2.0.html # # See the LICENSE file in the project's top-level directory for details. import unittest import os from clacks.agent.acl import ACL, ACLSet, ACLRole, ACLRoleEntry, ACLResolver, ACLException from clacks.common import Environment Environment.reset() Environment.config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.conf") Environment.noargs = True class TestACLResolver(unittest.TestCase): env = None ldap_base = None def setUp(self): """ Stuff to be run before every test """ self.env = Environment.getInstance() self.resolver = ACLResolver() self.resolver.clear() self.ldap_base = self.resolver.base def test_simple_exported_command(self): # Ensure that we've got the right permissions to perform this tests. acls = ACLSet() acl = ACL(scope=ACL.SUB) acl.add_action('%s.acl' % self.env.domain, 'rw') acl.set_members(['acl_tester']) acls.add(acl) self.resolver.add_acl_set(acls) # ------------- # Create first role with some acls self.resolver.addACLRole('acl_tester', 'rolle1') self.resolver.addACLToRole('acl_tester', 'rolle1', 0, [{'topic': 'com.wurstpelle.de', 'acls': 'rwcds'}], 'sub') # Create another role which uses the above defined role self.resolver.addACLRole('acl_tester', 'rolle2') self.resolver.addACLToRole('acl_tester', 'rolle2', 0, None, None, 'rolle1') # Now use the role 'rolle1' and check if it is resolved correclty lid = self.resolver.addACL('acl_tester', 'dc=gonicus,dc=de', 0, ['peter'], None, None, 'rolle2') self.assertTrue(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should be able to read, but he cannot!") # Set the currently added acl-rule to a non-role based acl and defined some actions self.resolver.updateACL('acl_tester', lid, members=['peter', 'cajus'], actions=[{'topic': 'com.*', 'acls': 'rwcds'}], scope='sub') self.assertTrue(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should be able to read, but he cannot!") self.resolver.updateACL('acl_tester', lid, actions=[{'topic': 'com.nope', 'acls': 'rwcds'}]) self.assertFalse(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should not be able to read, but he can!") # Drop the actions and fall back to use a role. self.resolver.updateACL('acl_tester', lid, rolename='rolle2') self.assertTrue(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should be able to read, but he cannot!") # ----------------- # Now update the role-acl 1 to use another role. self.resolver.addACLRole('acl_tester', 'dummy') self.resolver.updateACLRole('acl_tester', 2, use_role='dummy') self.assertFalse(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should not be able to read, but he can!") # Now switch back to an action-based acl. self.resolver.updateACLRole('acl_tester', 2, actions=[{'topic': 'com.wurstpelle.de', 'acls': 'rwcds'}], scope='sub') self.assertTrue(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should be able to read, but he cannot!") #------------------ # Now remove the role-acl with id 1 from the resolver. self.resolver.removeRoleACL('acl_tester', 2) self.assertFalse(self.resolver.check('peter', 'com.wurstpelle.de', 'r', {}, 'dc=1,dc=gonicus,dc=de'), "Resolving acl-roles using the exported clacks.agent commands does not work! The user should not be able to read, but he can!") # ----------------- # Try to remove role 'roll2' self.assertRaises(ACLException, self.resolver.removeRole, 'acl_tester', 'rolle2') def test_role_removal(self): """ This test checks if an ACLRole objects can be removed! """ # Create an ACLRole role = ACLRole('role1') acl = ACLRoleEntry(scope=ACL.ONE) acl.add_action('org.clacks.factory', 'rwx') role.add(acl) self.resolver.add_acl_role(role) # Use the recently created role. base = self.ldap_base aclset = ACLSet(base) acl = ACL(role='role1') acl.set_members([u'tester1']) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACLRoles are not resolved correctly! The user should be able to read, but he cannot!") self.assertRaises(ACLException, self.resolver.remove_role, 'role1') self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACLRoles are not resolved correctly! The user should be able to read, but he cannot!") self.resolver.remove_aclset_by_base(base) self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Role removal failed! The user should not be able to read, but he can!") self.assertTrue(self.resolver.remove_role('role1'), "Role removal failed! The expected return code was True!") self.assertTrue(len(self.resolver.list_roles()) == 0, "Role removal failed! The role still exists despite removal!") def test_remove_acls_for_user(self): # Create acls with scope SUB aclset = ACLSet() acl = ACL(scope=ACL.SUB) acl.set_members([u'tester1', u'tester2']) acl.add_action('org.clacks.factory', 'rwx') aclset.add(acl) self.resolver.add_acl_set(aclset) # Now remove all ACLs for user 'tester1' but keep those for 'tester2' self.resolver.remove_acls_for_user('tester1') # No check the permissions 'tester1' should not be able to read anymore, where 'tester2' should. self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r'), "Removing ACLs for a specific user does not work correctly! The user should not be able to read, but he can!") self.assertTrue(self.resolver.check('tester2', 'org.clacks.factory', 'r'), "Removing ACLs for a specific user does not work correctly! The user should still be able to read, but he cannot!") def test_role_endless_recursion(self): """ A test which ensures that roles do not refer to each other, creating an endless-recursion. role1 -> role2 -> role1 """ # Create an ACLRole role1 = ACLRole('role1') role2 = ACLRole('role2') role3 = ACLRole('role3') self.resolver.add_acl_role(role1) self.resolver.add_acl_role(role2) self.resolver.add_acl_role(role3) acl1 = ACLRoleEntry(role='role2') acl2 = ACLRoleEntry(role='role3') acl3 = ACLRoleEntry(role='role1') role1.add(acl1) role2.add(acl2) role3.add(acl3) # Use the recently created role. base = self.ldap_base aclset = ACLSet(base) acl = ACL(role='role1') acl.set_members([u'tester1']) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertRaises(Exception, self.resolver.check, 'tester1', 'org.clacks.factory', 'r', base=base) def test_user_wildcards(self): """ checks if wildcards/regular expressions can be used for ACL member names i.e. to match all users starting with 'clacks_' and ending with '_test' acl.set_members([u'^clacks_.*_test$']) """ # Create acls with wildcard # in actions base = self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.ONE) acl.set_members([u'^clacks_.*_test$']) acl.add_action('org.clacks.factory', 'rwx') acl.set_priority(100) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('clacks_user_test', 'org.clacks.factory', 'r', base=base), "Wildcards in ACL members are not resolved correctly! The user was not able to read, but he should!") # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('clacks__test', 'org.clacks.factory', 'r', base=base), "Wildcards in ACL members are not resolved correctly! The user was not able to read, but he should!") # Check the permissions to be sure that they are set correctly self.assertFalse(self.resolver.check('clacks_test_testWrong', 'org.clacks.factory', 'r', base=base), "Wildcards in ACL members are not resolved correctly! The was able to read, but he shouldn't!") def test_action_wildcards(self): """ This test checks if ACLs containing wildcard actions are processed correctly. e.g. To match all actions for 'com.' that ends with '.fatcory' acl.add_action('com.#.factory', 'rwx') """ # Create acls with wildcard # in actions base = self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.ONE) acl.set_members([u'tester1']) acl.add_action('com.#.factory', 'rwx') acl.set_priority(100) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Wildcards (#) in actions are not resolved correctly! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'com.gonicus.factory', 'r', base=base), "Wildcards (#) in actions are not resolved correctly! The user should be able to read, but he cannot!") self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Wildcards (#) in actions are not resolved correctly! The user should not be able to read, but he can!") # Clear created ACL defintions self.resolver.clear() # Create acls with wildcard * in actions base = self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.ONE) acl.set_members([u'tester1']) acl.add_action('com.*.factory', 'rwx') acl.set_priority(100) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Wildcards (*) in actions are not resolved correctly! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'com.gonicus.factory', 'r', base=base), "Wildcards (*) in actions are not resolved correctly! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Wildcards (*) in actions are not resolved correctly! The user should be able to read, but he cannot!") def test_roles(self): """ This test checks if ACLRole objects are resolved correctly. """ # Create an ACLRole role = ACLRole('role1') acl = ACLRoleEntry(scope=ACL.ONE) acl.add_action('org.clacks.factory', 'rwx') role.add(acl) self.resolver.add_acl_role(role) # Use the recently created role. base = self.ldap_base aclset = ACLSet(base) acl = ACL(role='role1') acl.set_members([u'tester1']) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACLRoles are not resolved correctly! The user should be able to read, but he cannot!") def test_role_recursion(self): """ This test checks if ACLRoles that contain ACLRoles are resolved correctly. e.g. ACLSet -> Acl -> points to role1 role1 -> AclRoleEntry -> points to role 2 role 2 contains the effective acls. """ # Create an ACLRole role1 = ACLRole('role1') acl = ACLRoleEntry(scope=ACL.ONE) acl.add_action('org.clacks.factory', 'rwx') role1.add(acl) self.resolver.add_acl_role(role1) # Create another ACLRole wich refers to first one role2 = ACLRole('role2') acl = ACLRoleEntry(role='role1') role2.add(acl) self.resolver.add_acl_role(role2) # Use the recently created role. base = self.ldap_base aclset = ACLSet(base) acl = ACL(role='role2') acl.set_members([u'tester1']) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Stacked ACLRoles are not resolved correctly! The user should be able to read, but he cannot!") def test_acl_priorities(self): # Set up a RESET and a ONE or SUB scoped acl for the same base # and check which wins. # Create acls with scope SUB base = self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.ONE) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') acl.set_priority(100) aclset.add(acl) self.resolver.add_acl_set(aclset) # Check the permissions to be sure that they are set correctly self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Acl priorities are not handled correctly! The user should be able to read, but he cannot!") # Now add the RESET acl acl = ACL(scope=ACL.RESET) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') acl.set_priority(99) aclset.add(acl) # Check the permissions to be sure that they are set correctly self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "Acl priorities are not handled correctly! The user should not be able to read, but he can!") def test_acls_scope_reset(self): """ This test checks if an ACL entry containing the RESET scope revokes permission correctly. """ # Create acls with scope SUB base = "dc=a," + self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.SUB) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') aclset.add(acl) self.resolver.add_acl_set(aclset) # Check for acls for the base, should return False base = self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should not be able to read, but he can!") # Check for acls for the tree we've created acls for. base = "dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should be able to read, but he cannot!") # Check for acls for one level above the acl definition base = "dc=b,dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should be able to read, but he cannot!") # Check for acls for two levels above the acl definition base = "dc=c,dc=b,dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should be able to read, but he cannot!") # ------ # Now add the ACL.RESET # ------ base = "dc=b,dc=a," + self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.RESET) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') aclset.add(acl) self.resolver.add_acl_set(aclset) # Check for acls for the tree we've created acls for. # Should return True base = "dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should be able to read, but he cannot!") # Check for acls for one level above the acl definition # Should return False base = "dc=b,dc=a," + self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should not be able to read, but he can!") # Check for acls for two levels above the acl definition # Should return False base = "dc=c,dc=b,dc=a," + self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope RESET is not resolved correclty! The user should not be able to read, but he can!") def test_acls_scope_sub(self): """ This test checks if permissions with scope SUB are spreed over the subtree correctly. A ACL.SUB scope will effect the complete subtree of the base. (In case that no ACL.RESET is used.) """ # Create acls with scope SUB base = "dc=a," + self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.SUB) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') aclset.add(acl) self.resolver.add_acl_set(aclset) # Check for read, write, create, execute permisions base = "dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope SUB is not resolved correclty! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'w', base=base), "ACL scope SUB is not resolved correclty! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'x', base=base), "ACL scope SUB is not resolved correclty! The user should be able to read, but he cannot!") self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'd', base=base), "ACL scope SUB is not resolved correclty! The user should not be able to read, but he can!") # Check for permissions one level above the base we've created acls for. # This should return True. base = "dc=b,dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope SUB is not resolved correclty! The user should be able to read, but he cannot!") # Check for permissions tow levels above the base we've created acls for. # This should return True too. base = "dc=c,dc=b,dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope SUB is not resolved correclty! The user should be able to read, but he cannot!") # Check for permissions one level below the base we've created acls for. # This should return False. base = self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope SUB is not resolved correclty! The user should not be able to read, but he can!") def test_acls_scope_one(self): """ This test check if the scope ACL.ONE is populated correclty. """ # Create acls with scope ONE base = "dc=a," + self.ldap_base aclset = ACLSet(base) acl = ACL(scope=ACL.ONE) acl.set_members([u'tester1']) acl.add_action('org.clacks.factory', 'rwx') aclset.add(acl) self.resolver.add_acl_set(aclset) # Check for read, write, create, execute permisions base = "dc=a," + self.ldap_base self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope ONE is not resolved correclty! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'w', base=base), "ACL scope ONE is not resolved correclty! The user should be able to read, but he cannot!") self.assertTrue(self.resolver.check('tester1', 'org.clacks.factory', 'x', base=base), "ACL scope ONE is not resolved correclty! The user should be able to read, but he cannot!") self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'd', base=base), "ACL scope ONE is not resolved correclty! The user should not be able to read, but he can!") # Check for permissions one level above the base we've created acls for. base = "dc=b,dc=a," + self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope ONE is not resolved correclty! The user should not be able to read, but he can!") # Check for permissions one level below the base we've created acls for. base = self.ldap_base self.assertFalse(self.resolver.check('tester1', 'org.clacks.factory', 'r', base=base), "ACL scope ONE is not resolved correclty! The user should not be able to read, but he can!") if __name__ == '__main__': unittest.main()
46.454365
143
0.628283
3,205
23,413
4.532293
0.091108
0.073523
0.052871
0.040273
0.788517
0.762839
0.725802
0.717885
0.717403
0.715063
0
0.00864
0.253534
23,413
503
144
46.54672
0.82251
0.194123
0
0.695502
0
0.089965
0.366503
0.009086
0
0
0
0
0.17301
1
0.044983
false
0
0.013841
0
0.069204
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
48dfb9719cb2941797960f3a8912e3a587343bf9
57
py
Python
integration-open-weather-background/integration_open_weather/exceptions/weather_forecast.py
dalmarcogd/weather-forecast
f0987009c5691e46d9b8b6ba6f4408688ebec944
[ "Apache-2.0" ]
null
null
null
integration-open-weather-background/integration_open_weather/exceptions/weather_forecast.py
dalmarcogd/weather-forecast
f0987009c5691e46d9b8b6ba6f4408688ebec944
[ "Apache-2.0" ]
null
null
null
integration-open-weather-background/integration_open_weather/exceptions/weather_forecast.py
dalmarcogd/weather-forecast
f0987009c5691e46d9b8b6ba6f4408688ebec944
[ "Apache-2.0" ]
null
null
null
class WeatherForecastRequiredParams(Exception): pass
19
47
0.824561
4
57
11.75
1
0
0
0
0
0
0
0
0
0
0
0
0.122807
57
2
48
28.5
0.94
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
48e2c9e094884c3bcf6f34c77267c13330591692
24
py
Python
__init__.py
LuoxinY/gateway
a17ecfafee1e8ace609c3da06a0111b5c266c00e
[ "MIT" ]
4
2020-04-11T12:52:13.000Z
2020-07-08T06:38:08.000Z
__init__.py
LuoxinY/v2ray_subscribe
438863ec944f30fae29d159ebc0d1c14021c0887
[ "MIT" ]
null
null
null
__init__.py
LuoxinY/v2ray_subscribe
438863ec944f30fae29d159ebc0d1c14021c0887
[ "MIT" ]
2
2020-07-27T13:27:24.000Z
2020-10-01T04:11:31.000Z
__version__ = "0.0.0.3"
12
23
0.625
5
24
2.2
0.6
0.363636
0
0
0
0
0
0
0
0
0
0.190476
0.125
24
1
24
24
0.333333
0
0
0
0
0
0.291667
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
48e6bf8ab4bc23d25b2b7d07874b2242a18c383e
225
py
Python
escalate/core/views/__init__.py
darkreactions/ESCALATE
0020da00b81a2dd80d1c9fd72d2edf92b519e605
[ "MIT" ]
11
2020-09-29T13:59:02.000Z
2022-03-23T04:57:52.000Z
escalate/core/views/__init__.py
darkreactions/ESCALATE
0020da00b81a2dd80d1c9fd72d2edf92b519e605
[ "MIT" ]
95
2019-11-18T20:10:49.000Z
2022-03-31T17:09:49.000Z
escalate/core/views/__init__.py
darkreactions/ESCALATE
0020da00b81a2dd80d1c9fd72d2edf92b519e605
[ "MIT" ]
2
2021-11-26T18:22:08.000Z
2022-03-31T11:57:10.000Z
from .login import * from .menu import * from .crud_views import * from .export_views import * from .model_tag import * from .workflow import * from .user_views import * # from .edocument import * #from .misc_views import *
20.454545
27
0.742222
32
225
5.0625
0.40625
0.493827
0.277778
0
0
0
0
0
0
0
0
0
0.168889
225
10
28
22.5
0.86631
0.217778
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5b02462bf47906dfb578f4df5f1f9ba0b281332b
191
py
Python
LeetCode/easy - Array/1431. Kids With the Greatest Number of Candies/solution.py
vincent507cpu/Comprehensive-Algorithm-Solution
04e01e49622457f09af2e1133954f043c0c92cb9
[ "MIT" ]
4
2020-06-26T00:45:53.000Z
2021-04-19T12:23:32.000Z
LeetCode/easy - Array/1431. Kids With the Greatest Number of Candies/solution.py
vincent507cpu/LeetCode-Comprehensive-Solution
04e01e49622457f09af2e1133954f043c0c92cb9
[ "MIT" ]
null
null
null
LeetCode/easy - Array/1431. Kids With the Greatest Number of Candies/solution.py
vincent507cpu/LeetCode-Comprehensive-Solution
04e01e49622457f09af2e1133954f043c0c92cb9
[ "MIT" ]
null
null
null
class Solution: # list comprehension def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]: return [x + extraCandies >= max(candies) for x in candies]
47.75
83
0.685864
23
191
5.695652
0.695652
0
0
0
0
0
0
0
0
0
0
0
0.198953
191
4
84
47.75
0.856209
0.094241
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
d297efc24967c92b1bbd79b53c06ce6dc4dd21bd
99
py
Python
service/__init__.py
GudniNatan/GSKI-PA6
a0f9a38bc0d2f6710f803a77276e6a76cd6f4471
[ "MIT" ]
null
null
null
service/__init__.py
GudniNatan/GSKI-PA6
a0f9a38bc0d2f6710f803a77276e6a76cd6f4471
[ "MIT" ]
null
null
null
service/__init__.py
GudniNatan/GSKI-PA6
a0f9a38bc0d2f6710f803a77276e6a76cd6f4471
[ "MIT" ]
null
null
null
# from service.member_service import MemberService # from service.sport_service import SportService
49.5
50
0.868687
12
99
7
0.583333
0.261905
0
0
0
0
0
0
0
0
0
0
0.090909
99
2
51
49.5
0.933333
0.959596
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d2d78bffce92c410baf05ecfbd8618dff5cb7ddb
144
py
Python
src/main.py
tylerkramlich/readme-analyzer
f5424678a7204c524979d451099fa2d660cbe0c3
[ "MIT" ]
null
null
null
src/main.py
tylerkramlich/readme-analyzer
f5424678a7204c524979d451099fa2d660cbe0c3
[ "MIT" ]
null
null
null
src/main.py
tylerkramlich/readme-analyzer
f5424678a7204c524979d451099fa2d660cbe0c3
[ "MIT" ]
null
null
null
# TODO: Argparse main from readme_analyzer import ReadmeAnalyzer analyzer = ReadmeAnalyzer('src/readme_analyzer_config.cfg') analyzer.runAll()
24
59
0.826389
17
144
6.823529
0.705882
0.241379
0
0
0
0
0
0
0
0
0
0
0.090278
144
6
60
24
0.885496
0.131944
0
0
0
0
0.241935
0.241935
0
0
0
0.166667
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
0
0
0
5
9623ca53acb0a2870420581f1b0f26ecffaa79db
68
py
Python
CodeWars/Python/7 kyu/Find the stray number/main.py
opastushkov/codewars-solutions
0132a24259a4e87f926048318332dcb4d94858ca
[ "MIT" ]
null
null
null
CodeWars/Python/7 kyu/Find the stray number/main.py
opastushkov/codewars-solutions
0132a24259a4e87f926048318332dcb4d94858ca
[ "MIT" ]
null
null
null
CodeWars/Python/7 kyu/Find the stray number/main.py
opastushkov/codewars-solutions
0132a24259a4e87f926048318332dcb4d94858ca
[ "MIT" ]
null
null
null
def stray(arr): return [x for x in arr if arr.count(x) == 1][0]
34
52
0.588235
15
68
2.666667
0.733333
0
0
0
0
0
0
0
0
0
0
0.038462
0.235294
68
2
52
34
0.730769
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
82693d324fb9971ceba57f7eae5aa7909289a111
82
py
Python
ngm.py
GMNaumov/sg-github-training
9682c7126deb7c7947093f90894d7663a398d3b2
[ "Apache-2.0" ]
null
null
null
ngm.py
GMNaumov/sg-github-training
9682c7126deb7c7947093f90894d7663a398d3b2
[ "Apache-2.0" ]
null
null
null
ngm.py
GMNaumov/sg-github-training
9682c7126deb7c7947093f90894d7663a398d3b2
[ "Apache-2.0" ]
1
2021-07-12T18:41:20.000Z
2021-07-12T18:41:20.000Z
print('Dratuti!') print('Hello!') print('Hello, Georgios!') print('Hello, Pupa!')
16.4
25
0.658537
10
82
5.4
0.5
0.555556
0
0
0
0
0
0
0
0
0
0
0.073171
82
4
26
20.5
0.710526
0
0
0
0
0
0.512195
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
827be058cd35d906b7a728d6e018afa9eaffd1c0
62
py
Python
gru/contrib/auth/backends/__init__.py
similarweb/gru
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
[ "BSD-2-Clause-FreeBSD" ]
7
2016-12-11T19:58:33.000Z
2020-07-11T08:55:34.000Z
gru/contrib/auth/backends/__init__.py
similarweb/gru
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
gru/contrib/auth/backends/__init__.py
similarweb/gru
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
[ "BSD-2-Clause-FreeBSD" ]
1
2019-12-09T19:31:50.000Z
2019-12-09T19:31:50.000Z
from .ldap import LdapBackend from .dummy import DummyBackend
20.666667
31
0.83871
8
62
6.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
32
31
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8281813687879c14230310a3d75a6d81d4878a59
53
py
Python
PageDownloader.py
fede-da/PageDownloader
2344e2307ea374690ba05923056fff9e59c9ad12
[ "MIT" ]
null
null
null
PageDownloader.py
fede-da/PageDownloader
2344e2307ea374690ba05923056fff9e59c9ad12
[ "MIT" ]
null
null
null
PageDownloader.py
fede-da/PageDownloader
2344e2307ea374690ba05923056fff9e59c9ad12
[ "MIT" ]
null
null
null
from gui import Gui myGui: Gui = Gui() myGui.run()
8.833333
19
0.660377
9
53
3.888889
0.555556
0.457143
0
0
0
0
0
0
0
0
0
0
0.207547
53
5
20
10.6
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
82b091dab135afd3bc6f0d1854b995d778943199
17,458
py
Python
loghub/tests/test_changelog.py
xlotlu/loghub
17691793e358af6c59f3b41d3b98e9ae27d93a2d
[ "MIT" ]
34
2016-08-08T09:58:28.000Z
2022-03-27T20:16:51.000Z
loghub/tests/test_changelog.py
xlotlu/loghub
17691793e358af6c59f3b41d3b98e9ae27d93a2d
[ "MIT" ]
82
2016-11-03T04:44:59.000Z
2020-07-05T02:03:34.000Z
loghub/tests/test_changelog.py
xlotlu/loghub
17691793e358af6c59f3b41d3b98e9ae27d93a2d
[ "MIT" ]
13
2016-08-08T09:58:30.000Z
2020-05-27T09:10:46.000Z
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) The Spyder Development Team # # Licensed under the terms of the MIT License # (See LICENSE.txt for details) # ----------------------------------------------------------------------------- """Tests changelog output.""" # Standard library imports import os import sys import tempfile # Third party imports from mock import MagicMock, patch import pytest # Local imports from loghub.cli.main import create_changelog, parse_arguments from loghub.external.github import JsonObject REPO = 'spyder-ide/loghub' TEST_TOKEN = os.environ.get('TEST_TOKEN', '').replace('x', '') TEST_USER = os.environ.get('TEST_USER', '').replace('x', '') TEST_PASS = os.environ.get('TEST_CODE', '').replace('x', '') TEST_MILESTONE = 'test-milestone' TEST_MILESTONE_REAL = 'v0.2' TEST_TAG = 'v0.1.2' NOT_ON_CI = os.environ.get('CIRCLECI') != 'true' # --- Tests # ----------------------------------------------------------------------------- @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog_loghub_real_milestone(): log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE_REAL, branch='master', output_format='changelog') expected = '''## Version 0.2 (2017-02-01) ### Issues Closed * [Issue 64](https://github.com/spyder-ide/loghub/issues/64) - Create release for v0.2 * [Issue 57](https://github.com/spyder-ide/loghub/issues/57) - Allow Enable/Disable of PR output ([PR 65](https://github.com/spyder-ide/loghub/pull/65)) * [Issue 56](https://github.com/spyder-ide/loghub/issues/56) - Fix output for grouped labels if no items on that group ([PR 58](https://github.com/spyder-ide/loghub/pull/58)) * [Issue 55](https://github.com/spyder-ide/loghub/issues/55) - Fix date on generation based on milestone ([PR 58](https://github.com/spyder-ide/loghub/pull/58)) * [Issue 53](https://github.com/spyder-ide/loghub/issues/53) - Normalize CI files using requirements.txt ([PR 60](https://github.com/spyder-ide/loghub/pull/60)) * [Issue 52](https://github.com/spyder-ide/loghub/issues/52) - Update README to use full commands ([PR 58](https://github.com/spyder-ide/loghub/pull/58)) * [Issue 47](https://github.com/spyder-ide/loghub/issues/47) - Update Appveyor Badge for new username spyder-ide ([PR 48](https://github.com/spyder-ide/loghub/pull/48)) * [Issue 46](https://github.com/spyder-ide/loghub/issues/46) - Create labels automagically on github based on template ([PR 59](https://github.com/spyder-ide/loghub/pull/59)) * [Issue 42](https://github.com/spyder-ide/loghub/issues/42) - Generate all changelog based on milestones * [Issue 41](https://github.com/spyder-ide/loghub/issues/41) - Add default template for release with issue label grouping ([PR 43](https://github.com/spyder-ide/loghub/pull/43)) * [Issue 40](https://github.com/spyder-ide/loghub/issues/40) - Provide available milestones to user when provided not found ([PR 44](https://github.com/spyder-ide/loghub/pull/44)) * [Issue 39](https://github.com/spyder-ide/loghub/issues/39) - Add checkignore to quantified code * [Issue 30](https://github.com/spyder-ide/loghub/issues/30) - Add label groupings ([PR 34](https://github.com/spyder-ide/loghub/pull/34)) * [Issue 28](https://github.com/spyder-ide/loghub/issues/28) - Generate all changelog based on tags ([PR 61](https://github.com/spyder-ide/loghub/pull/61)) * [Issue 27](https://github.com/spyder-ide/loghub/issues/27) - Generate all changelog based on milestones ([PR 61](https://github.com/spyder-ide/loghub/pull/61)) * [Issue 23](https://github.com/spyder-ide/loghub/issues/23) - Update Readme with new CLI parameters ([PR 36](https://github.com/spyder-ide/loghub/pull/36)) * [Issue 21](https://github.com/spyder-ide/loghub/issues/21) - Add branch parameter to select PRs to display based on the merging branch ([PR 32](https://github.com/spyder-ide/loghub/pull/32)) * [Issue 16](https://github.com/spyder-ide/loghub/issues/16) - Better authorization handling ([PR 44](https://github.com/spyder-ide/loghub/pull/44)) * [Issue 15](https://github.com/spyder-ide/loghub/issues/15) - Since tag not working properly ([PR 13](https://github.com/spyder-ide/loghub/pull/13)) * [Issue 11](https://github.com/spyder-ide/loghub/issues/11) - Support hidden password ([PR 14](https://github.com/spyder-ide/loghub/pull/14)) * [Issue 10](https://github.com/spyder-ide/loghub/issues/10) - Support custom line formats [templates] ([PR 20](https://github.com/spyder-ide/loghub/pull/20)) * [Issue 8](https://github.com/spyder-ide/loghub/issues/8) - Support "all issues" ([PR 19](https://github.com/spyder-ide/loghub/pull/19)) * [Issue 4](https://github.com/spyder-ide/loghub/issues/4) - Expose labels for additional filtering ([PR 17](https://github.com/spyder-ide/loghub/pull/17)) * [Issue 3](https://github.com/spyder-ide/loghub/issues/3) - Add access token interface ([PR 18](https://github.com/spyder-ide/loghub/pull/18)) * [Issue 2](https://github.com/spyder-ide/loghub/issues/2) - Add some basic tests ([PR 29](https://github.com/spyder-ide/loghub/pull/29)) In this release 25 issues were closed. ### Pull Requests Merged * [PR 65](https://github.com/spyder-ide/loghub/pull/65) - PR: Add support for no-prs on output ([57](https://github.com/spyder-ide/loghub/issues/57)) * [PR 61](https://github.com/spyder-ide/loghub/pull/61) - PR: Add batch mode for all tags for all milestones ([28](https://github.com/spyder-ide/loghub/issues/28), [27](https://github.com/spyder-ide/loghub/issues/27)) * [PR 60](https://github.com/spyder-ide/loghub/pull/60) - PR: Add requirements file and update CI process ([53](https://github.com/spyder-ide/loghub/issues/53)) * [PR 59](https://github.com/spyder-ide/loghub/pull/59) - PR: Add a label creator utility ([46](https://github.com/spyder-ide/loghub/issues/46)) * [PR 58](https://github.com/spyder-ide/loghub/pull/58) - PR: Refactor code and simplify group handling ([56](https://github.com/spyder-ide/loghub/issues/56), [55](https://github.com/spyder-ide/loghub/issues/55), [52](https://github.com/spyder-ide/loghub/issues/52)) * [PR 54](https://github.com/spyder-ide/loghub/pull/54) - PR: Remove versioneer * [PR 51](https://github.com/spyder-ide/loghub/pull/51) - PR: Update ignore file to ignore versioneer files * [PR 50](https://github.com/spyder-ide/loghub/pull/50) - PR: Add versioneer for version string control based on git tags not on manual editing ([49](https://github.com/spyder-ide/loghub/issues/49)) * [PR 48](https://github.com/spyder-ide/loghub/pull/48) - PR: Update AppVeyor badge because of move to org account ([47](https://github.com/spyder-ide/loghub/issues/47)) * [PR 45](https://github.com/spyder-ide/loghub/pull/45) - PR: Break into smaller funcs * [PR 44](https://github.com/spyder-ide/loghub/pull/44) - PR: Improve error handling ([40](https://github.com/spyder-ide/loghub/issues/40), [16](https://github.com/spyder-ide/loghub/issues/16)) * [PR 43](https://github.com/spyder-ide/loghub/pull/43) - PR: Add template for release grouped issue labels ([41](https://github.com/spyder-ide/loghub/issues/41)) * [PR 36](https://github.com/spyder-ide/loghub/pull/36) - PR: Update readme with latest CLI commands ([23](https://github.com/spyder-ide/loghub/issues/23)) * [PR 34](https://github.com/spyder-ide/loghub/pull/34) - PR: Add label grouping for issues ([30](https://github.com/spyder-ide/loghub/issues/30)) * [PR 32](https://github.com/spyder-ide/loghub/pull/32) - PR: Feature/branch ([21](https://github.com/spyder-ide/loghub/issues/21)) * [PR 31](https://github.com/spyder-ide/loghub/pull/31) - PR: Refactor code * [PR 29](https://github.com/spyder-ide/loghub/pull/29) - PR: Maintenance/tests ([2](https://github.com/spyder-ide/loghub/issues/2)) * [PR 20](https://github.com/spyder-ide/loghub/pull/20) - PR: Feature/templates ([10](https://github.com/spyder-ide/loghub/issues/10)) * [PR 19](https://github.com/spyder-ide/loghub/pull/19) - PR: Add support for all issues when no tag or no milestone is provided ([8](https://github.com/spyder-ide/loghub/issues/8)) * [PR 18](https://github.com/spyder-ide/loghub/pull/18) - PR: Add support for github tokens ([3](https://github.com/spyder-ide/loghub/issues/3)) * [PR 17](https://github.com/spyder-ide/loghub/pull/17) - PR: Add regex label filtering for issues and prs ([4](https://github.com/spyder-ide/loghub/issues/4)) * [PR 14](https://github.com/spyder-ide/loghub/pull/14) - add password prompt when user is specified and password is blank ([11](https://github.com/spyder-ide/loghub/issues/11)) * [PR 13](https://github.com/spyder-ide/loghub/pull/13) - PR: Fix bug on since keyword returning incorrect issues ([15](https://github.com/spyder-ide/loghub/issues/15)) * [PR 12](https://github.com/spyder-ide/loghub/pull/12) - Don't test macOS in Travis * [PR 9](https://github.com/spyder-ide/loghub/pull/9) - Don't show "Issues" or "Pull requests" sections if there are no issues or PRs to show * [PR 7](https://github.com/spyder-ide/loghub/pull/7) - Fix contents of temporary file * [PR 6](https://github.com/spyder-ide/loghub/pull/6) - Remove extra empty lines * [PR 5](https://github.com/spyder-ide/loghub/pull/5) - Fix PR_SHORT formatting. * [PR 1](https://github.com/spyder-ide/loghub/pull/1) - Several fixes In this release 29 pull requests were closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog(): log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='master', output_format='changelog') expected = '''## Version test-milestone (2016-12-05) ### Issues Closed * [Issue 77](https://github.com/spyder-ide/loghub/issues/77) - Test empty body * [Issue 26](https://github.com/spyder-ide/loghub/issues/26) - Test number 2 * [Issue 24](https://github.com/spyder-ide/loghub/issues/24) - Issue test In this release 3 issues were closed. ### Pull Requests Merged * [PR 25](https://github.com/spyder-ide/loghub/pull/25) - PR: Add tests folder In this release 1 pull request was closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog_release(): log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='master', output_format='release') expected = '''## Version test-milestone (2016-12-05) ### Issues Closed * Issue #77 - Test empty body * Issue #26 - Test number 2 * Issue #24 - Issue test In this release 3 issues were closed. ### Pull Requests Merged * PR #25 - PR: Add tests folder In this release 1 pull request was closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog_release_branch(): log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch', output_format='release') expected = '''## Version test-milestone (2016-12-05) ### Issues Closed * Issue #77 - Test empty body * Issue #26 - Test number 2 * Issue #24 - Issue test In this release 3 issues were closed. ### Pull Requests Merged * PR #85 - Test no repo in body * PR #84 - Test no issue number in PR's body * PR #33 - PR: Test change In this release 3 pull requests were closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') @patch('loghub.core.formatter.GitHubRepo') def test_changelog_label_groups(gh_mock): gh_mock.return_value = gh_obj = MagicMock() gh_obj.milestone.return_value = {'closed_at': '2016-12-05'} issues = [{ 'loghub_label_names': ['type:bug'], 'number': 1, 'title': 'first issue', 'html_url': 'a_url', }] prs = [{ 'loghub_label_names': ['type:bug'], 'number': 2, 'title': 'first pull request', 'html_url': 'a_url', 'pull_request': True, 'body': '', }] gh_obj.issues.return_value = [JsonObject(x) for x in issues + prs] issue_label_groups = [{'label': 'type:bug', 'name': 'Bugs fixed'}] # group issues and PRs log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch', issue_label_groups=issue_label_groups, pr_label_groups=issue_label_groups) expected = '''## Version test-milestone (2016-12-05) #### Bugs fixed * [Issue 1](https://github.com/spyder-ide/loghub/issues/1) - first issue * [PR 2](https://github.com/spyder-ide/loghub/pull/2) - first pull request In this release 1 issue and 1 pull request were closed. ''' print([log]) print([expected]) assert log == expected # group issues only, list PRs log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch', issue_label_groups=issue_label_groups) expected = '''## Version test-milestone (2016-12-05) ### Issues Closed #### Bugs fixed * [Issue 1](https://github.com/spyder-ide/loghub/issues/1) - first issue In this release 1 issue was closed. ### Pull Requests Merged * [PR 2](https://github.com/spyder-ide/loghub/pull/2) - first pull request In this release 1 pull request was closed. ''' print([log]) print([expected]) assert log == expected # group PRs only, list issues log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch', pr_label_groups=issue_label_groups) expected = '''## Version test-milestone (2016-12-05) ### Issues Closed * [Issue 1](https://github.com/spyder-ide/loghub/issues/1) - first issue In this release 1 issue was closed. ### Pull Requests Merged #### Bugs fixed * [PR 2](https://github.com/spyder-ide/loghub/pull/2) - first pull request In this release 1 pull request was closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog_issue_groups(): issue_label_groups = [{'label': 'type:bug', 'name': 'Bugs fixed'}] log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch', issue_label_groups=issue_label_groups) expected = '''## Version test-milestone (2016-12-05) ### Issues Closed #### Bugs fixed * [Issue 26](https://github.com/spyder-ide/loghub/issues/26) - Test number 2 In this release 1 issue was closed. ### Pull Requests Merged * [PR 85](https://github.com/spyder-ide/loghub/pull/85) - Test no repo in body * [PR 84](https://github.com/spyder-ide/loghub/pull/84) - Test no issue number in PR's body * [PR 33](https://github.com/spyder-ide/loghub/pull/33) - PR: Test change In this release 3 pull requests were closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') @patch('loghub.core.formatter.GitHubRepo') def test_no_comments(gh_mock): gh_mock.return_value = gh_obj = MagicMock() gh_obj.milestone.return_value = {'closed_at': '2016-12-05'} prs = [{ 'loghub_label_names': [], 'number': 1, 'title': 'pull request', 'html_url': 'spyder-ide/loghub', 'pull_request': True, 'body': '<!--- Fixes #777 --->\nFixes #12', }] gh_obj.issues.return_value = [JsonObject(x) for x in prs] # group issues and PRs log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, branch='test-branch') expected = '''## Version test-milestone (2016-12-05) ### Pull Requests Merged * [PR 1](https://github.com/spyder-ide/loghub/pull/1) - pull request ([12](https://github.com/spyder-ide/loghub/issues/12)) In this release 1 pull request was closed. ''' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_changelog_template(): template = '''{% for i in issues -%} * Issue #{{ i['number'] }} - {{ i['title'] }} {% endfor %}''' desc, path = tempfile.mkstemp() with open(path, 'w') as f: f.write(template) log = create_changelog( repo=REPO, token=TEST_TOKEN, milestone=TEST_MILESTONE, template_file=path) expected = '* Issue #77 - Test empty body\n* Issue #26 - Test number 2\n* Issue #24 - Issue test\n' print([log]) print([expected]) assert log == expected @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_argument_parser_invalid(): args = ['prog'] with pytest.raises(SystemExit): with patch.object(sys, 'argv', args): parse_arguments(skip=True) @pytest.mark.skipif(NOT_ON_CI, reason='test on ci server only') def test_argument_parser_valid(): args = [ 'prog', 'spyder-ide/loghub', '-ilg', 'type:bug', 'Bugs fixed', '-t', TEST_TOKEN ] with patch.object(sys, 'argv', args): options = parse_arguments() assert options.issue_label_groups == [['type:bug', 'Bugs fixed']]
43.106173
266
0.676423
2,603
17,458
4.476373
0.132539
0.091916
0.151905
0.197391
0.73172
0.725712
0.707861
0.635942
0.632338
0.474425
0
0.038671
0.146809
17,458
404
267
43.212871
0.743605
0.032249
0
0.591503
0
0.228758
0.706294
0.003793
0
0
0
0
0.035948
1
0.03268
false
0.009804
0.022876
0
0.055556
0.065359
0
0
0
null
0
0
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7d6786f15e1862e2186e9d4c2113dbe55478325a
146
py
Python
django_cookicutter/django_cookie_app/apps.py
gregadc/django-cookiecutter
6d4031fb2016f57810d7da2414d40a9145c2aebe
[ "MIT" ]
null
null
null
django_cookicutter/django_cookie_app/apps.py
gregadc/django-cookiecutter
6d4031fb2016f57810d7da2414d40a9145c2aebe
[ "MIT" ]
null
null
null
django_cookicutter/django_cookie_app/apps.py
gregadc/django-cookiecutter
6d4031fb2016f57810d7da2414d40a9145c2aebe
[ "MIT" ]
null
null
null
from django.apps import AppConfig class DjangoCookieAppConfig(AppConfig): name = 'django_cookie_app' verbose_name = "Django cookie app"
20.857143
39
0.767123
17
146
6.411765
0.647059
0.183486
0.293578
0.348624
0
0
0
0
0
0
0
0
0.164384
146
6
40
24.333333
0.893443
0
0
0
0
0
0.232877
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
7dad4891c4f9222c395b9878a8dc56d85f9a6852
57
py
Python
enthought/pyface/ui/qt4/wizard/wizard.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/pyface/ui/qt4/wizard/wizard.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/pyface/ui/qt4/wizard/wizard.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from pyface.ui.qt4.wizard.wizard import *
19
41
0.77193
9
57
4.888889
0.888889
0
0
0
0
0
0
0
0
0
0
0.02
0.122807
57
2
42
28.5
0.86
0.210526
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
815d2f78267ae0346957e19680537e6f7e37606d
199
py
Python
sharpy/plans/acts/terran/__init__.py
raspersc2/sharpy-sc2
ec8f5870eab233b1d09a54a09bd8b76ea2585735
[ "MIT" ]
2
2020-08-13T01:25:20.000Z
2020-11-22T19:00:06.000Z
sharpy/plans/acts/terran/__init__.py
raspersc2/sharpy-sc2
ec8f5870eab233b1d09a54a09bd8b76ea2585735
[ "MIT" ]
null
null
null
sharpy/plans/acts/terran/__init__.py
raspersc2/sharpy-sc2
ec8f5870eab233b1d09a54a09bd8b76ea2585735
[ "MIT" ]
null
null
null
from .act_build_addon import ActBuildAddon from .morph_orbitals import MorphOrbitals from .morph_planetary import MorphPlanetary from .terran_unit import TerranUnit from .auto_depot import AutoDepot
33.166667
43
0.874372
26
199
6.461538
0.653846
0.107143
0
0
0
0
0
0
0
0
0
0
0.100503
199
5
44
39.8
0.938547
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
818d2a86d8a517de8d12b7dac92fd960e4b0d1b5
1,047
py
Python
JioSaavn/__baseApiUrl.py
vidyasagar1432/jiosaavn
b5a39cfdcbb1a5a0abf38aefa11fd9989ff386a8
[ "MIT" ]
null
null
null
JioSaavn/__baseApiUrl.py
vidyasagar1432/jiosaavn
b5a39cfdcbb1a5a0abf38aefa11fd9989ff386a8
[ "MIT" ]
null
null
null
JioSaavn/__baseApiUrl.py
vidyasagar1432/jiosaavn
b5a39cfdcbb1a5a0abf38aefa11fd9989ff386a8
[ "MIT" ]
null
null
null
def albumFromID(id:int): return f'https://www.jiosaavn.com/api.php?__call=content.getAlbumDetails&_format=json&cc=in&_marker=0%3F_marker=0&albumid={id}' def albumsearchFromSTRING(query:str): return f'https://www.jiosaavn.com/api.php?__call=autocomplete.get&_format=json&_marker=0&cc=in&includeMetaTags=1&query={"+".join(query.split(" "))}' def songFromID(id:str): return f'https://www.jiosaavn.com/api.php?__call=song.getDetails&cc=in&_marker=0%3F_marker%3D0&_format=json&pids={id}' def songsearchFromSTRING(query:str,p:int,n:int): return f'https://www.jiosaavn.com/api.php?p={p}&_format=json&_marker=0&api_version=4&ctx=wap6dot0&n={n}&__call=search.getResults&q={"+".join(query.split(" "))}' def lyricsFromID(id:str): return f'https://www.jiosaavn.com/api.php?__call=lyrics.getLyrics&ctx=web6dot0&api_version=4&_format=json&_marker=0%3F_marker=0&lyrics_id={id}' def playlistFromID(id:str): return f'https://www.jiosaavn.com/api.php?__call=playlist.getDetails&_format=json&cc=in&_marker=0%3F_marker%3D0&listid={id}'
52.35
164
0.750716
171
1,047
4.415205
0.315789
0.074172
0.095364
0.119205
0.445033
0.422517
0.422517
0.389404
0.312583
0.21457
0
0.023232
0.054441
1,047
19
165
55.105263
0.739394
0
0
0
0
0.5
0.727273
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
819c2a2552741ecdf882c4fa411bce5eb8fade30
172
py
Python
util.py
chariothy/githook
08e2703113b4b76e2c66eca1eb2b2a755fb817c3
[ "MIT" ]
1
2021-01-15T10:25:25.000Z
2021-01-15T10:25:25.000Z
util.py
chariothy/githook
08e2703113b4b76e2c66eca1eb2b2a755fb817c3
[ "MIT" ]
null
null
null
util.py
chariothy/githook
08e2703113b4b76e2c66eca1eb2b2a755fb817c3
[ "MIT" ]
null
null
null
from pybeans import AppTool import os, sys, shutil, time APP = AppTool('githook', os.getcwd()) def now(): return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
21.5
63
0.656977
27
172
4.185185
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.145349
172
8
63
21.5
0.768707
0
0
0
0
0
0.138728
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
81b2faf106925d974749af3149c5b40d10d49e99
2,093
py
Python
tensorflow/python/keras/_impl/keras/layers/__init__.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
522
2016-06-08T02:15:50.000Z
2022-03-02T05:30:36.000Z
tensorflow/python/keras/_impl/keras/layers/__init__.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
48
2016-07-26T00:11:55.000Z
2022-02-23T13:36:33.000Z
tensorflow/python/keras/_impl/keras/layers/__init__.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
108
2016-06-16T15:34:05.000Z
2022-03-12T13:23:11.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras layers module. """ # pylint: disable=wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras._impl.keras.engine import Input from tensorflow.python.keras._impl.keras.engine import InputLayer from tensorflow.python.keras._impl.keras.engine import InputSpec from tensorflow.python.keras._impl.keras.engine import Layer from tensorflow.python.keras._impl.keras.layers.advanced_activations import * from tensorflow.python.keras._impl.keras.layers.convolutional import * from tensorflow.python.keras._impl.keras.layers.convolutional_recurrent import * from tensorflow.python.keras._impl.keras.layers.core import * from tensorflow.python.keras._impl.keras.layers.embeddings import * from tensorflow.python.keras._impl.keras.layers.local import * from tensorflow.python.keras._impl.keras.layers.merge import * from tensorflow.python.keras._impl.keras.layers.noise import * from tensorflow.python.keras._impl.keras.layers.normalization import * from tensorflow.python.keras._impl.keras.layers.pooling import * from tensorflow.python.keras._impl.keras.layers.recurrent import * from tensorflow.python.keras._impl.keras.layers.serialization import deserialize from tensorflow.python.keras._impl.keras.layers.serialization import serialize from tensorflow.python.keras._impl.keras.layers.wrappers import *
51.04878
80
0.788342
281
2,093
5.75089
0.359431
0.155941
0.222772
0.278465
0.55198
0.55198
0.55198
0.502475
0.189356
0
0
0.004222
0.094601
2,093
40
81
52.325
0.848549
0.341137
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.047619
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
81cad6ca5d194769155fcf621b7c97e37c8f1a7a
694
py
Python
chicken_quote.py
WetDude/chicken_quote
bc1d3f88c963364178047993d1a07fe18b026083
[ "MIT" ]
1
2016-06-05T16:50:34.000Z
2016-06-05T16:50:34.000Z
chicken_quote.py
WetDude/chicken_quote
bc1d3f88c963364178047993d1a07fe18b026083
[ "MIT" ]
null
null
null
chicken_quote.py
WetDude/chicken_quote
bc1d3f88c963364178047993d1a07fe18b026083
[ "MIT" ]
null
null
null
#! /usr/bin/python import sys g_quote = "------------------------------------------------" g_chicken = ( " \\\n" " \\ /\\__/\\\n" " \\ \\ /\n" " | 0 >>\n" " | |\n" " |____|\n" " ____((__<| |\n" " ( |\n" " ( |\n" " (_____________/\n" " | |\n" " | |\n" " /\ /\\\n" ) if len(sys.argv) < 2: print("Please pass a quote") sys.exit(0) print(g_quote) print(" >> " + sys.argv[1]) print(g_quote) print(g_chicken)
23.133333
60
0.243516
49
694
2.836735
0.408163
0.158273
0.194245
0.201439
0.071942
0.071942
0.071942
0.071942
0.071942
0
0
0.011869
0.514409
694
29
61
23.931034
0.400593
0.024496
0
0.291667
0
0
0.683432
0.071006
0
0
0
0
0
1
0
false
0.041667
0.041667
0
0.041667
0.208333
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c48eb82f65cafbdcf19c173188fa154499456055
15,780
py
Python
fireant/tests/queries/test_build_sets.py
gl3nn/fireant
f172c3f4117ece1240d62575b57e7485484764a8
[ "Apache-2.0" ]
122
2016-08-05T13:34:52.000Z
2022-03-15T13:21:13.000Z
fireant/tests/queries/test_build_sets.py
gl3nn/fireant
f172c3f4117ece1240d62575b57e7485484764a8
[ "Apache-2.0" ]
321
2016-08-10T08:48:15.000Z
2021-07-28T13:08:18.000Z
fireant/tests/queries/test_build_sets.py
gl3nn/fireant
f172c3f4117ece1240d62575b57e7485484764a8
[ "Apache-2.0" ]
27
2016-08-10T08:11:08.000Z
2021-08-23T08:14:37.000Z
from unittest import TestCase from pypika import ( Table, functions as fn, ) import fireant as f from fireant.tests.dataset.mocks import test_database test_table = Table("test") ds = f.DataSet( table=test_table, database=test_database, fields=[ f.Field("date", definition=test_table.date, data_type=f.DataType.date), f.Field("text", definition=test_table.text, data_type=f.DataType.text), f.Field("number", definition=test_table.number, data_type=f.DataType.number), f.Field("boolean", definition=test_table.boolean, data_type=f.DataType.boolean), f.Field( "aggr_number", definition=fn.Sum(test_table.number), data_type=f.DataType.number, ), ], ) # noinspection SqlDialectInspection,SqlNoDataSourceInspection class ResultSetTests(TestCase): maxDiff = None def test_no_metric_is_removed_when_result_set_metric_filter_is_present(self): queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.aggr_number > 10)).sql self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN SUM(\"number\")>10 THEN 'set(SUM(number)>10)' " "ELSE 'complement(SUM(number)>10)' END \"$set(SUM(number)>10)\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'ORDER BY 1 ' 'LIMIT 200000', str(queries[0]), ) def test_dimension_is_replaced_by_default_when_result_set_filter_is_present(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter(f.ResultSet(ds.fields.text == "abc")) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_is_replaced_by_default_in_the_target_dimension_place_when_result_set_filter_is_present( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.date) .dimension(ds.fields.text) .dimension(ds.fields.boolean) .filter(f.ResultSet(ds.fields.text == "abc")) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " '"date" "$date",' "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," '"boolean" "$boolean",' 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$date","$text","$boolean" ' 'ORDER BY "$date","$text","$boolean" ' 'LIMIT 200000', str(queries[0]), ) def test_dimension_with_dimension_modifier_is_replaced_by_default_when_result_set_filter_is_present( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.date) .dimension(f.Rollup(ds.fields.boolean)) .filter(f.ResultSet(ds.fields.boolean == True)) .sql ) self.assertEqual(len(queries), 2) with self.subTest('base query is the same as without totals'): self.assertEqual( "SELECT " '"date" "$date",' "CASE WHEN \"boolean\"=true THEN 'set(boolean=true)' ELSE 'complement(boolean=true)' END \"$boolean\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$date","$boolean" ' 'ORDER BY "$date","$boolean" ' 'LIMIT 200000', str(queries[0]), ) with self.subTest('totals dimension is replaced with _FIREANT_ROLLUP_VALUE_'): self.assertEqual( "SELECT " '"date" "$date",' '\'_FIREANT_ROLLUP_VALUE_\' "$boolean",' 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$date" ' 'ORDER BY "$date","$boolean" ' 'LIMIT 200000', str(queries[1]), ) def test_dimension_is_inserted_before_conditional_dimension_when_result_set_filter_wont_ignore_dimensions( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter(f.ResultSet(ds.fields.text == "abc", will_replace_referenced_dimension=False)) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$set(text='abc')\"," '"text" "$text",' 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$set(text=\'abc\')","$text" ' 'ORDER BY "$set(text=\'abc\')","$text" ' 'LIMIT 200000', str(queries[0]), ) def test_dimension_breaks_complement_down_when_result_set_filter_wont_group_complement( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter(f.ResultSet(ds.fields.text == "abc", will_group_complement=False)) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE \"text\" END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_is_inserted_in_dimensions_even_when_not_selected(self): queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.text == "abc")).sql self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_is_inserted_as_last_dimension_when_not_selected(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.date) .dimension(ds.fields.boolean) .filter(f.ResultSet(ds.fields.text == "abc")) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " '"date" "$date",' '"boolean" "$boolean",' "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$date","$boolean","$text" ' 'ORDER BY "$date","$boolean","$text" ' 'LIMIT 200000', str(queries[0]), ) def test_dimension_uses_set_label_kwarg_and_None_for_complement(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter(f.ResultSet(ds.fields.text == "abc", set_label="Text is ABC")) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE NULL END " "\"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_breaks_complement_down_even_when_set_label_is_set_when_result_set_filter_wont_group_complement( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter( f.ResultSet( ds.fields.text == "abc", set_label="IS ABC", will_group_complement=False, ) ) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_breaks_complement_down_even_when_both_labels_are_set_but_wont_group_complement( self, ): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter( f.ResultSet( ds.fields.text == "abc", set_label="IS ABC", complement_label="OTHERS", will_group_complement=False, ) ) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_uses_complement_label_kwarg_and_None_for_set(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter(f.ResultSet(ds.fields.text == "abc", complement_label="Text is NOT ABC")) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN NULL ELSE 'Text is NOT ABC' END " "\"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_uses_both_set_and_complement_label_kwargs_when_available(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.text) .filter( f.ResultSet( ds.fields.text == "abc", set_label="Text is ABC", complement_label="Text is NOT ABC", ) ) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " "CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE 'Text is NOT ABC' END " "\"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' "GROUP BY \"$text\" " "ORDER BY \"$text\" " "LIMIT 200000", str(queries[0]), ) def test_dimension_is_replaced_when_references_are_present(self): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields.date) .dimension(ds.fields.boolean) .reference(f.WeekOverWeek(ds.fields.date)) .filter(f.ResultSet(ds.fields.text == "abc")) .sql ) self.assertEqual(len(queries), 2) with self.subTest("base query"): self.assertEqual( "SELECT " '"date" "$date",' '"boolean" "$boolean",' "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' 'GROUP BY "$date","$boolean","$text" ' 'ORDER BY "$date","$boolean","$text" ' 'LIMIT 200000', str(queries[0]), ) with self.subTest("ref query"): self.assertEqual( "SELECT " 'TIMESTAMPADD(week,1,"date") "$date",' '"boolean" "$boolean",' "CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\"," 'SUM("number") "$aggr_number_wow" ' 'FROM "test" ' 'GROUP BY "$date","$boolean","$text" ' 'ORDER BY "$date","$boolean","$text" ' 'LIMIT 200000', str(queries[1]), ) def test_dimension_filter_variations_with_sets(self): for field_alias, fltr in [ ('text', ds.fields.text.like("%abc%")), ('text', ds.fields.text.not_like("%abc%")), ('text', ds.fields.text.like("%abc%", "%cde%")), ('text', ds.fields.text.not_like("%abc%", "%cde%")), ('text', ds.fields.text.isin(["abc"])), ('text', ds.fields.text.notin(["abc"])), ('date', ds.fields.date.between('date1', 'date2')), ('number', ds.fields.number.between(5, 15)), ('number', ds.fields.number.isin([1, 2, 3])), ('number', ds.fields.number.notin([1, 2, 3])), ]: fltr_sql = fltr.definition.get_sql(quote_char="") with self.subTest(fltr_sql): queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields[field_alias]) .filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B')) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' f"GROUP BY \"${field_alias}\" " f"ORDER BY \"${field_alias}\" " "LIMIT 200000", str(queries[0]), ) def test_deeply_nested_dimension_filter_with_sets(self): field_alias = 'text' fltr = ds.fields.text.like( fn.Concat( fn.Upper(fn.Trim(fn.Concat('%ab', ds.fields.number))), ds.fields.aggr_number, fn.Concat(ds.fields.date.between('date1', 'date2'), 'c%'), ) ) queries = ( ds.query.widget(f.Pandas(ds.fields.aggr_number)) .dimension(ds.fields[field_alias]) .filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B')) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' f"GROUP BY \"${field_alias}\" " f"ORDER BY \"${field_alias}\" " "LIMIT 200000", str(queries[0]), )
35.540541
121
0.500951
1,649
15,780
4.627047
0.091571
0.067104
0.044037
0.042464
0.792923
0.773657
0.756488
0.740105
0.701311
0.69633
0
0.016234
0.348099
15,780
443
122
35.620767
0.725479
0.003739
0
0.66
0
0.0175
0.224951
0.032699
0
0
0
0
0.085
1
0.04
false
0
0.01
0
0.055
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c4ba7b88a4b5847d0b3a6b70d501c32cccb1dcc6
1,465
py
Python
tatsu/tokenizing.py
bookofproofs/TatSu
501875416c8c802bb518f35f1ae08d9ebf437af2
[ "BSD-2-Clause" ]
259
2017-05-22T04:33:21.000Z
2022-03-29T00:20:35.000Z
tatsu/tokenizing.py
bookofproofs/TatSu
501875416c8c802bb518f35f1ae08d9ebf437af2
[ "BSD-2-Clause" ]
160
2017-05-30T01:28:58.000Z
2022-03-31T02:45:52.000Z
tatsu/tokenizing.py
bookofproofs/TatSu
501875416c8c802bb518f35f1ae08d9ebf437af2
[ "BSD-2-Clause" ]
53
2017-05-22T05:00:58.000Z
2022-01-04T16:06:17.000Z
from __future__ import annotations from .util._common import _prints from .exceptions import ParseError # noqa class Tokenizer: def error(self, *args, **kwargs): raise ParseError(_prints(*args, **kwargs)) @property def filename(self): raise NotImplementedError @property def ignorecase(self): raise NotImplementedError @property def pos(self): raise NotImplementedError def goto(self, pos): raise NotImplementedError def atend(self): raise NotImplementedError def ateol(self): raise NotImplementedError @property def token(self): raise NotImplementedError @property def current(self): return self.token def next(self): raise NotImplementedError def next_token(self): raise NotImplementedError def match(self, token, ignorecase=False): raise NotImplementedError def matchre(self, pattern, ignorecase=False): raise NotImplementedError def posline(self, pos): raise NotImplementedError def line_info(self, pos=None): raise NotImplementedError def get_lines(self, start=None, end=None): raise NotImplementedError def lookahead(self): raise NotImplementedError def lookahead_pos(self): if self.atend(): return '' info = self.line_info() return '~%d:%d' % (info.line + 1, info.col + 1)
21.231884
55
0.645051
150
1,465
6.22
0.313333
0.385852
0.318328
0.166131
0.330118
0
0
0
0
0
0
0.001885
0.275768
1,465
68
56
21.544118
0.877474
0.00273
0
0.416667
0
0
0.004112
0
0
0
0
0
0
1
0.375
false
0
0.0625
0.020833
0.520833
0.041667
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
c4c6bea4fdec63109559e20197c623fad9bc637e
105,170
py
Python
thermo/eos_alpha_functions.py
RoryKurek/thermo
985279467faa028234ab422a19b69385e5100149
[ "MIT" ]
380
2016-07-04T09:45:20.000Z
2022-03-20T18:09:45.000Z
thermo/eos_alpha_functions.py
simonmb/thermo
9abbb0ea71abe8677155e029d01aebe74cce137f
[ "MIT" ]
104
2016-07-10T20:47:12.000Z
2022-03-22T20:43:39.000Z
thermo/eos_alpha_functions.py
simonmb/thermo
9abbb0ea71abe8677155e029d01aebe74cce137f
[ "MIT" ]
96
2016-07-05T20:54:05.000Z
2022-02-23T03:06:02.000Z
# -*- coding: utf-8 -*- # pylint: disable=E1101 r'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2018, 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This module contains implementations of the calculation of pure-component EOS :math:`a \alpha` parameters in a vectorized way. Functions for calculating their temperature derivatives as may be necessary are included as well. For certain alpha functions, a class is available to provide these functions to and class that inherits from it. A mixing rule must be used on the `a_alphas` to get the overall `a_alpha` term. .. contents:: :local: Vectorized Alpha Functions -------------------------- .. autofunction:: thermo.eos_alpha_functions.PR_a_alphas_vectorized .. autofunction:: thermo.eos_alpha_functions.SRK_a_alphas_vectorized .. autofunction:: thermo.eos_alpha_functions.PRSV_a_alphas_vectorized .. autofunction:: thermo.eos_alpha_functions.PRSV2_a_alphas_vectorized .. autofunction:: thermo.eos_alpha_functions.APISRK_a_alphas_vectorized .. autofunction:: thermo.eos_alpha_functions.RK_a_alphas_vectorized Vectorized Alpha Functions With Derivatives ------------------------------------------- .. autofunction:: thermo.eos_alpha_functions.PR_a_alpha_and_derivatives_vectorized .. autofunction:: thermo.eos_alpha_functions.SRK_a_alpha_and_derivatives_vectorized .. autofunction:: thermo.eos_alpha_functions.PRSV_a_alpha_and_derivatives_vectorized .. autofunction:: thermo.eos_alpha_functions.PRSV2_a_alpha_and_derivatives_vectorized .. autofunction:: thermo.eos_alpha_functions.APISRK_a_alpha_and_derivatives_vectorized .. autofunction:: thermo.eos_alpha_functions.RK_a_alpha_and_derivatives_vectorized Class With Alpha Functions -------------------------- The class-based ones van save a little code when implementing a new EOS. If there is not a standalone function available for an alpha function, it has not yet been accelerated in a nice vectorized way. .. autoclass:: thermo.eos_alpha_functions.a_alpha_base :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Almeida_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Androulakis_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Chen_Yang_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Coquelet_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Gasem_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Gibbons_Laughton_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Haghtalab_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Harmens_Knapp_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Heyen_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Mathias_1983_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_poly_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Mathias_Copeman_untruncated_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Melhem_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Poly_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Saffari_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Schwartzentruber_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Soave_1972_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Soave_1984_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Soave_1979_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Soave_1993_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Trebble_Bishnoi_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Twu91_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.TwuPR95_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.TwuSRK95_a_alpha :members: :undoc-members: :show-inheritance: .. autoclass:: thermo.eos_alpha_functions.Yu_Lu_a_alpha :members: :undoc-members: :show-inheritance: Pure Alpha Functions -------------------- .. autofunction:: thermo.eos_alpha_functions.Twu91_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Soave_1972_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Soave_1979_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Heyen_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Harmens_Knapp_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Mathias_1983_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Mathias_Copeman_untruncated_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Gibbons_Laughton_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Soave_1984_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Yu_Lu_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Trebble_Bishnoi_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Melhem_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Androulakis_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Schwartzentruber_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Almeida_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Soave_1993_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Gasem_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Coquelet_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Haghtalab_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Saffari_alpha_pure .. autofunction:: thermo.eos_alpha_functions.Chen_Yang_alpha_pure ''' from __future__ import division, print_function __all__ = [ 'PR_a_alphas_vectorized', 'PR_a_alpha_and_derivatives_vectorized', 'RK_a_alphas_vectorized', 'RK_a_alpha_and_derivatives_vectorized', 'SRK_a_alphas_vectorized', 'SRK_a_alpha_and_derivatives_vectorized', 'PRSV_a_alphas_vectorized', 'PRSV_a_alpha_and_derivatives_vectorized', 'PRSV2_a_alphas_vectorized', 'PRSV2_a_alpha_and_derivatives_vectorized', 'APISRK_a_alphas_vectorized', 'APISRK_a_alpha_and_derivatives_vectorized', 'a_alpha_base', 'Poly_a_alpha', 'Soave_1972_a_alpha', 'Heyen_a_alpha', 'Harmens_Knapp_a_alpha', 'Mathias_1983_a_alpha', 'Mathias_Copeman_untruncated_a_alpha', 'Mathias_Copeman_poly_a_alpha', 'Gibbons_Laughton_a_alpha', 'Soave_1984_a_alpha', 'Yu_Lu_a_alpha', 'Trebble_Bishnoi_a_alpha', 'Melhem_a_alpha', 'Androulakis_a_alpha', 'Schwartzentruber_a_alpha', 'Almeida_a_alpha', 'Twu91_a_alpha', 'Soave_1993_a_alpha', 'Gasem_a_alpha', 'Coquelet_a_alpha', 'Haghtalab_a_alpha', 'Saffari_a_alpha', 'Chen_Yang_a_alpha', 'TwuSRK95_a_alpha', 'TwuPR95_a_alpha', 'Soave_1979_a_alpha', 'Twu91_alpha_pure', 'Soave_1972_alpha_pure', 'Soave_1979_alpha_pure', 'Heyen_alpha_pure', 'Harmens_Knapp_alpha_pure', 'Mathias_1983_alpha_pure', 'Mathias_Copeman_untruncated_alpha_pure', 'Gibbons_Laughton_alpha_pure', 'Soave_1984_alpha_pure', 'Yu_Lu_alpha_pure', 'Trebble_Bishnoi_alpha_pure', 'Melhem_alpha_pure', 'Androulakis_alpha_pure', 'Schwartzentruber_alpha_pure', 'Almeida_alpha_pure', 'Soave_1993_alpha_pure', 'Gasem_alpha_pure', 'Coquelet_alpha_pure', 'Haghtalab_alpha_pure', 'Saffari_alpha_pure', 'Chen_Yang_alpha_pure', 'Mathias_Copeman_a_alpha'] from fluids.numerics import (horner, horner_and_der2, numpy as np) from chemicals.utils import log, exp, sqrt, copysign try: array = np.array except: pass def PR_a_alphas_vectorized(T, Tcs, ais, kappas, a_alphas=None): r'''Calculates the `a_alpha` terms for the Peng-Robinson equation of state given the critical temperatures `Tcs`, constants `ais`, and `kappas`. .. math:: a_i\alpha(T)_i=a_i [1+\kappa_i(1-\sqrt{T_{r,i}})]^2 Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2] kappas : list[float] `kappa` parameters of Peng-Robinson EOS; formulas vary, but the original form uses :math:`\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i`, [-] a_alphas : list[float], optional Vector for pure component `a_alpha` terms in the cubic EOS to be calculated and stored in, [Pa*m^6/mol^2] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [2.0698956357716662, 2.7018068455659545, 3.3725793885832323] >>> kappas = [0.74192743008, 0.819919992, 0.8800122140799999] >>> PR_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais, kappas=kappas) [2.6306811679, 3.6761503348, 4.8593286234] ''' N = len(Tcs) x0_inv = 1.0/sqrt(T) x0 = T*x0_inv if a_alphas is None: a_alphas = [0.0]*N for i in range(N): x1 = 1.0/sqrt(Tcs[i]) x2 = kappas[i]*(x0*x1 - 1.) - 1. a_alphas[i] = ais[i]*x2*x2 return a_alphas def PR_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappas, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first two temperature derivatives for the Peng-Robinson equation of state given the critical temperatures `Tcs`, constants `ais`, and `kappas`. .. math:: a_i\alpha(T)_i=a_i[1+\kappa_i(1-\sqrt{T_{r,i}})]^2 .. math:: \frac{d a_i\alpha_i}{dT} = - \frac{a_i \kappa_i}{T^{0.5} {T_c}_i^{0.5}} \left(\kappa_i \left(- \frac{T^{0.5}}{{T_c}_i^{0.5}} + 1\right) + 1\right) .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = 0.5 a_i \kappa_i \left(- \frac{1}{T^{1.5} {T_c}_i^{0.5}} \left(\kappa_i \left(\frac{T^{0.5}}{{T_c}_i^{0.5}} - 1\right) - 1\right) + \frac{\kappa_i}{T {T_c}_i}\right) Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}` [Pa*m^6/mol^2] kappas : list[float] `kappa` parameters of Peng-Robinson EOS; formulas vary, but the original form uses :math:`\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i`, [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [2.0698956357716662, 2.7018068455659545, 3.3725793885832323] >>> kappas = [0.74192743008, 0.819919992, 0.8800122140799999] >>> PR_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais, kappas=kappas) ([2.63068116797, 3.67615033489, 4.859328623453], [-0.0044497546430, -0.00638993749167, -0.0085372308846], [1.066668360e-05, 1.546687574587e-05, 2.07440632117e-05]) ''' N = len(Tcs) x0_inv = 1.0/sqrt(T) x0 = T*x0_inv T_inv = x0_inv*x0_inv x0T_inv = x0_inv*T_inv x5, x6 = 0.5*T_inv, 0.5*x0T_inv if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N for i in range(N): x1 = 1.0/sqrt(Tcs[i]) x2 = kappas[i]*(x0*x1 - 1.) - 1. x3 = ais[i]*kappas[i] x4 = x1*x2 a_alphas[i] = ais[i]*x2*x2 da_alpha_dTs[i] = x4*x3*x0_inv d2a_alpha_dT2s[i] = x3*(x5*x1*x1*kappas[i] - x4*x6) return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def SRK_a_alphas_vectorized(T, Tcs, ais, ms, a_alphas=None): r'''Calculates the `a_alpha` terms for the SRK equation of state given the critical temperatures `Tcs`, constants `ais`, and `kappas`. .. math:: a_i\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}} \right)\right]^2 Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] ms : list[float] `m` parameters of SRK EOS; formulas vary, but the original form uses :math:`m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2`, [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315] >>> ms = [0.8610138239999999, 0.9436976, 1.007889024] >>> SRK_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais, ms=ms) [2.549485814512, 3.586598245260, 4.76614806648] ''' sqrtT = sqrt(T) N = len(Tcs) if a_alphas is None: a_alphas = [0.0]*N for i in range(N): x0 = ms[i]*(1. - sqrtT/sqrt(Tcs[i])) + 1.0 a_alphas[i] = ais[i]*x0*x0 return a_alphas def SRK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, ms, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first and second temperature derivatives for the SRK equation of state given the critical temperatures `Tcs`, constants `ais`, and `kappas`. .. math:: a_i\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}} \right)\right]^2 .. math:: \frac{d a_i\alpha_i}{dT} = \frac{a_i m_i}{T} \sqrt{\frac{T}{T_{c,i}}} \left(m_i \left(\sqrt{\frac{T}{T{c,i}}} - 1\right) - 1\right) .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = \frac{a_i m_i \sqrt{\frac{T}{T_{c,i}}}} {2 T^{2}} \left(m_i + 1\right) Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] ms : list[float] `m` parameters of SRK EOS; formulas vary, but the original form uses :math:`m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2`, [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315] >>> ms = [0.8610138239999999, 0.9436976, 1.007889024] >>> SRK_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais, ms=ms) ([2.549485814512, 3.586598245260, 4.76614806648], [-0.004915469296196, -0.00702410108423, -0.00936320876945], [1.236441916324e-05, 1.77752796719e-05, 2.37231823137e-05]) ''' N = len(Tcs) sqrtnT = 1.0/sqrt(T) sqrtT = T*sqrtnT T_inv = sqrtnT*sqrtnT x10 = 0.5*T_inv*T_inv nT_inv = -T_inv if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N for i in range(N): x1 = sqrtT/sqrt(Tcs[i]) x2 = ais[i]*ms[i]*x1 x3 = ms[i]*(1.0 - x1) + 1. a_alphas[i] = ais[i]*x3*x3 da_alpha_dTs[i] = x2*nT_inv*x3 d2a_alpha_dT2s[i] = x2*x10*(ms[i] + 1.) return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def RK_a_alphas_vectorized(T, Tcs, ais, a_alphas=None): r'''Calculates the `a_alpha` terms for the RK equation of state given the critical temperatures `Tcs`, and `a` parameters `ais`. .. math:: a_i\alpha_i = \frac{a_i}{\sqrt{\frac{T}{T_{c,i}}}} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315] >>> RK_a_alphas_vectorized(322.29, Tcs=Tcs, ais=ais) [2.3362073307, 3.16943743055, 4.0825575798] ''' N = len(ais) if a_alphas is None: a_alphas = [0.0]*N T_root_inv = 1.0/sqrt(T) for i in range(N): a_alphas[i] = ais[i]*sqrt(Tcs[i])*T_root_inv return a_alphas def RK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first and second temperature derivatives for the RK equation of state given the critical temperatures `Tcs`, and `a` parameters `ais`. .. math:: a_i\alpha_i = \frac{a_i}{\sqrt{\frac{T}{T_{c,i}}}} .. math:: \frac{d a_i\alpha_i}{dT} = - \frac{a_i}{2 T\sqrt{\frac{T}{T_{c,i}}}} .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = \frac{3 a_i}{4 T^{2}\sqrt{\frac{T}{T_{c,i}}}} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> Tcs = [469.7, 507.4, 540.3] >>> ais = [1.9351940385541342, 2.525982668162287, 3.1531036708059315] >>> RK_a_alpha_and_derivatives_vectorized(322.29, Tcs=Tcs, ais=ais) ([2.3362073307, 3.16943743055, 4.08255757984], [-0.00362438693525, -0.0049170582868, -0.00633367088622], [1.6868597855e-05, 2.28849403652e-05, 2.94781294155e-05]) ''' N = len(ais) if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N T_root_inv = 1.0/sqrt(T) T_inv = T_root_inv*T_root_inv T_15_inv = T_inv*T_root_inv T_25_inv = T_inv*T_15_inv x0 = -0.5*T_15_inv x1 = 0.75*T_25_inv for i in range(N): Tc_05 = sqrt(Tcs[i]) aiTc_05 = ais[i]*Tc_05 a_alphas[i] = aiTc_05*T_root_inv da_alpha_dTs[i] = aiTc_05*x0 d2a_alpha_dT2s[i] = aiTc_05*x1 return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def PRSV_a_alphas_vectorized(T, Tcs, ais, kappa0s, kappa1s, a_alphas=None): r'''Calculates the `a_alpha` terms for the Peng-Robinson-Stryjek-Vera equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV parameters `kappa0s` and `kappa1s`. .. math:: a_i\alpha_i = a_i \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{ T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right) \right) \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + 1\right)^{2} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2] kappa0s : list[float] `kappa0` parameters of PRSV EOS; the original form uses :math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-] kappa1s : list[float] Fit parameters, can be set to 0 if unknown [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> Tcs = [507.6] >>> ais = [2.6923169620277805] >>> kappa0s = [0.8074380841890093] >>> kappa1s = [0.05104] >>> PRSV_a_alphas_vectorized(299.0, Tcs=Tcs, ais=ais, kappa0s=kappa0s, kappa1s=kappa1s) [3.81298569831] ''' sqrtT = sqrt(T) N = len(Tcs) if a_alphas is None: a_alphas = [0.0]*N for i in range(N): Tc_inv_root = 1.0/sqrt(Tcs[i]) Tc_inv = Tc_inv_root*Tc_inv_root x0 = Tc_inv_root*sqrtT x2 = (1.0 + (kappa0s[i] + kappa1s[i]*(x0 + 1.0)*(0.7 - T*Tc_inv))*(1.0 - x0)) a_alphas[i] = ais[i]*x2*x2 return a_alphas def PRSV_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappa0s, kappa1s, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first and second derivative for the Peng-Robinson-Stryjek-Vera equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV parameters `kappa0s` and `kappa1s`. .. math:: a_i\alpha_i = a_i \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{ T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right) \right) \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + 1\right)^{2} .. math:: \frac{d a_i\alpha_i}{dT} =a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}} \right) \left(\kappa_{0,i} + \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right) \left(2 \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left( - \frac{\kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\kappa_{1,i} \sqrt{\frac{T}{T_{c,i}}} \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)}{2 T}\right) - \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{0,i} + \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right)}{T} \right) .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = \frac{a_{i} \left(\left(\kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{20 \left(\sqrt{ \frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) - \frac{\sqrt{\frac{T} {T_{c,i}}} \left(10 \kappa_{0,i} - \kappa_{1,i} \left(\sqrt{\frac{T} {T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right)}{T} \right)^{2} - \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\left(10 \kappa_{0,i} - \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right) \left(\sqrt{\frac{T} {T_{c,i}}} - 1\right) - 10\right) \left(\kappa_{1,i} \left(\frac{40} {T_{c,i}} - \frac{\frac{10 T}{T_{c,i}} - 7}{T}\right) \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right) + 2 \kappa_{1,i} \left(\frac{20 \left( \sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T} {T_{c,i}}} \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) + \frac{10 \kappa_{0,i} - \kappa_{1,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right)}{T}\right)}{200} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2] kappa0s : list[float] `kappa0` parameters of PRSV EOS; the original form uses :math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-] kappa1s : list[float] Fit parameters, can be set to 0 if unknown [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> Tcs = [507.6] >>> ais = [2.6923169620277805] >>> kappa0s = [0.8074380841890093] >>> kappa1s = [0.05104] >>> PRSV_a_alpha_and_derivatives_vectorized(299.0, Tcs=Tcs, ais=ais, kappa0s=kappa0s, kappa1s=kappa1s) ([3.8129856983], [-0.0069769034748], [2.00265608110e-05]) ''' r''' Formula derived with: from sympy import * Tc = symbols('T_{c\,i}') T, a, kappa0, kappa1 = symbols('T, a_i, \kappa_{0\,i}, \kappa_{1\,i}') kappa = kappa0 + kappa1*(1 + sqrt(T/Tc))*(Rational(7, 10)-T/Tc) a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2 diff(a_alpha, T, 2) ''' sqrtT = sqrt(T) T_inv = 1.0/T N = len(Tcs) if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N for i in range(N): Tc_inv_root = 1.0/sqrt(Tcs[i]) Tc_inv = Tc_inv_root*Tc_inv_root x1 = T*Tc_inv x2 = sqrtT*Tc_inv_root x3 = x2 - 1. x4 = 10.*x1 - 7. x5 = x2 + 1. x6 = 10.*kappa0s[i] - kappa1s[i]*x4*x5 x7 = x3*x6 x8 = x7*0.1 - 1. x10 = x6*T_inv x11 = kappa1s[i]*x3 x12 = x4*T_inv x13 = 20.*Tc_inv*x5 + x12*x2 x14 = -x10*x2 + x11*x13 a_alpha = ais[i]*x8*x8 da_alpha_dT = -ais[i]*x14*x8*0.1 d2a_alpha_dT2 = ais[i]*0.005*(x14*x14 - x2*T_inv*(x7 - 10.)*(2.*kappa1s[i]*x13 + x10 + x11*(40.*Tc_inv - x12))) a_alphas[i] = a_alpha da_alpha_dTs[i] = da_alpha_dT d2a_alpha_dT2s[i] = d2a_alpha_dT2 return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def PRSV2_a_alphas_vectorized(T, Tcs, ais, kappa0s, kappa1s, kappa2s, kappa3s, a_alphas=None): r'''Calculates the `a_alpha` terms for the Peng-Robinson-Stryjek-Vera 2 equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV2 parameters `kappa0s, `kappa1s`, `kappa2s`, and `kappa3s`. .. math:: a_i\alpha_i = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right)^{2} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2] kappa0s : list[float] `kappa0` parameters of PRSV EOS; the original form uses :math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-] kappa1s : list[float] Fit parameters, can be set to 0 if unknown [-] kappa2s : list[float] Fit parameters, can be set to 0 if unknown [-] kappa3s : list[float] Fit parameters, can be set to 0 if unknown [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> PRSV2_a_alphas_vectorized(400.0, Tcs=[507.6], ais=[2.6923169620277805], kappa0s=[0.8074380841890093], kappa1s=[0.05104], kappa2s=[0.8634], kappa3s=[0.460]) [3.2005700986984] ''' sqrtT = sqrt(T) N = len(Tcs) if a_alphas is None: a_alphas = [0.0]*N for i in range(N): Tc_inv_root = 1.0/sqrt(Tcs[i]) Tr_sqrt = sqrtT*Tc_inv_root Tr = T*Tc_inv_root*Tc_inv_root kappa = (kappa0s[i] + ((kappa1s[i] + kappa2s[i]*(kappa3s[i] - Tr) *(1.0 - Tr_sqrt))*(1.0 + Tr_sqrt)*(0.7 - Tr))) x0 = (1.0 + kappa*(1.0 - Tr_sqrt)) a_alphas[i] = ais[i]*x0*x0 return a_alphas def PRSV2_a_alpha_and_derivatives_vectorized(T, Tcs, ais, kappa0s, kappa1s, kappa2s, kappa3s, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first and second derivatives for the Peng-Robinson-Stryjek-Vera 2 equation of state given the critical temperatures `Tcs`, constants `ais`, PRSV2 parameters `kappa0s, `kappa1s`, `kappa2s`, and `kappa3s`. .. math:: a_i\alpha_i = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right)^{2} .. math:: \frac{d a_i\alpha_i}{dT} = a_{i} \left(\left(1 - \sqrt{\frac{T}{T_{c,i} }}\right) \left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left( 1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right)\right) + 1\right) \left(2 \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(\left(\sqrt{ \frac{T}{T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10} \right) \left(- \frac{\kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}} \right)}{T_{c,i}} - \frac{\kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}} \left( - \frac{T}{T_{c,i}} + \kappa_{3,i}\right)}{2 T}\right) - \frac{\left( \kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{ \frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i} }} \left(\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}} \right) \left(- \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left( - \frac{T}{T_{c,i}} + \frac{7}{10}\right)}{2 T}\right) - \frac{\sqrt{ \frac{T}{T_{c,i}}} \left(\kappa_{0,i} + \left(\kappa_{1,i} + \kappa_{2,i} \left(1 - \sqrt{\frac{T}{T_{c,i}}}\right) \left( - \frac{T}{T_{c,i}} + \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T} {T_{c,i}}} + 1\right) \left(- \frac{T}{T_{c,i}} + \frac{7}{10}\right) \right)}{T}\right) .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = - \frac{a_{i} \left(\left(\left(10 \kappa_{0,i} - \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T} {T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right) \right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T} {T_{c,i}} - 7\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) - 10\right) \left(\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left( \frac{40 \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left( \frac{2 \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{ \sqrt{\frac{T}{T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)} {T}\right)}{T_{c,i}} + \frac{\kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}} \left(\frac{4}{T_{c,i}} - \frac{\frac{T}{T_{c,i}} - \kappa_{3,i}}{T} \right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T} {T_{c,i}} - 7\right)}{T} + \frac{2 \kappa_{2,i} \sqrt{\frac{T}{T_{c,i}}} \left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{\frac {T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right)}{T} + \frac{40 \sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i} \right)\right)}{T T_{c,i}} - \frac{\sqrt{\frac{T}{T_{c,i}}} \left( \kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left(\frac{10 T} {T_{c,i}} - 7\right)}{T^{2}}\right) + \frac{2 \sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T} {T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right) + \frac{20 \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T} {T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right) \right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left(\frac{10 T}{T_{c,i}} - 7\right)} {T}\right)}{T} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(10 \kappa_{0,i} - \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right)}{T^{2}}\right) - \left(\left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right) \left(\frac{2 \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T}{T_{c,i}}} \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)}{T}\right) + \frac{20 \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right)\right) \left( \sqrt{\frac{T}{T_{c,i}}} + 1\right)}{T_{c,i}} + \frac{\sqrt{\frac{T} {T_{c,i}}} \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T} {T_{c,i}}} - 1\right) \left(\frac{T}{T_{c,i}} - \kappa_{3,i}\right) \right) \left(\frac{10 T}{T_{c,i}} - 7\right)}{T}\right) - \frac{ \sqrt{\frac{T}{T_{c,i}}} \left(10 \kappa_{0,i} - \left(\kappa_{1,i} + \kappa_{2,i} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) \left(\frac{T} {T_{c,i}} - \kappa_{3,i}\right)\right) \left(\sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{10 T}{T_{c,i}} - 7\right)\right)}{T}\right)^{2} \right)}{200} Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}`, [Pa*m^6/mol^2] kappa0s : list[float] `kappa0` parameters of PRSV EOS; the original form uses :math:`\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2 + 0.0196554\omega_i^3`, [-] kappa1s : list[float] Fit parameters, can be set to 0 if unknown [-] kappa2s : list[float] Fit parameters, can be set to 0 if unknown [-] kappa3s : list[float] Fit parameters, can be set to 0 if unknown [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> PRSV2_a_alpha_and_derivatives_vectorized(400.0, Tcs=[507.6], ais=[2.6923169620277805], kappa0s=[0.8074380841890093], kappa1s=[0.05104], kappa2s=[0.8634], kappa3s=[0.460]) ([3.2005700986], [-0.005301195971], [1.11181477576e-05]) ''' sqrtT = sqrt(T) T_inv = 1.0/T N = len(Tcs) if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N for i in range(N): Tc_inv_root = 1.0/sqrt(Tcs[i]) Tc_inv = Tc_inv_root*Tc_inv_root x1 = T*Tc_inv x2 = sqrtT*Tc_inv_root x3 = x2 - 1. x4 = x2 + 1. x5 = 10.*x1 - 7. x6 = -kappa3s[i] + x1 x7 = kappa1s[i] + kappa2s[i]*x3*x6 x8 = x5*x7 x9 = 10.*kappa0s[i] - x4*x8 x10 = x3*x9 x11 = x10*0.1 - 1.0 x13 = x2*T_inv x14 = x7*Tc_inv x15 = kappa2s[i]*x4*x5 x16 = 2.*(-x2 + 1.)*Tc_inv + x13*(kappa3s[i] - x1) x17 = -x13*x8 - x14*(20.*x2 + 20.) + x15*x16 x18 = x13*x9 + x17*x3 x19 = x2*T_inv*T_inv x20 = 2.*x2*T_inv a_alpha = ais[i]*x11*x11 da_alpha_dT = ais[i]*x11*x18*0.1 d2a_alpha_dT2 = ais[i]*(x18*x18 + (x10 - 10.)*(x17*x20 - x19*x9 + x3*(40.*kappa2s[i]*Tc_inv*x16*x4 + kappa2s[i]*x16*x20*x5 - 40.*T_inv*x14*x2 - x15*T_inv*x2*(4.0*Tc_inv - x6*T_inv) + x19*x8)))*0.005 a_alphas[i] = a_alpha da_alpha_dTs[i] = da_alpha_dT d2a_alpha_dT2s[i] = d2a_alpha_dT2 return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def APISRK_a_alphas_vectorized(T, Tcs, ais, S1s, S2s, a_alphas=None): r'''Calculates the `a_alpha` terms for the API SRK equation of state given the critical temperatures `Tcs`, constants `ais`, and API parameters `S1s` and `S2s`. .. math:: a_i\alpha(T)_i = a_i \left[1 + S_{1,i}\left(1-\sqrt{T_{r,i}}\right) + S_{2,i} \frac{1- \sqrt{T_{r,i}}}{\sqrt{T_{r,i}}}\right]^2 Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] S1s : list[float] `S1` parameters of API SRK EOS; regressed or estimated with :math:`S_{1,i} = 0.48508 + 1.55171\omega_i - 0.15613\omega_i^2`, [-] S2s : list[float] `S2` parameters of API SRK EOS; regressed or set to zero, [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] Notes ----- Examples -------- >>> APISRK_a_alphas_vectorized(T=430.0, Tcs=[514.0], ais=[1.2721974560809934], S1s=[1.678665], S2s=[-0.216396]) [1.60465652994097] ''' N = len(Tcs) sqrtT = sqrt(T) if a_alphas is None: a_alphas = [0.0]*N for i in range(N): rtTr = 1.0/sqrt(Tcs[i]) x0 = (-rtTr*sqrtT + 1.) x1 = 1.0/(rtTr*sqrtT) x2 = (S1s[i]*x0 + S2s[i]*(x0)*x1 + 1.0) a_alphas[i] = ais[i]*x2*x2 return a_alphas def APISRK_a_alpha_and_derivatives_vectorized(T, Tcs, ais, S1s, S2s, a_alphas=None, da_alpha_dTs=None, d2a_alpha_dT2s=None): r'''Calculates the `a_alpha` terms and their first two temperature derivatives for the API SRK equation of state given the critical temperatures `Tcs`, constants `ais`, and API parameters `S1s` and `S2s`. .. math:: a_i\alpha(T)_i = a_i \left[1 + S_{1,i}\left(1-\sqrt{T_{r,i}}\right) + S_{2,i} \frac{1- \sqrt{T_{r,i}}}{\sqrt{T_{r,i}}}\right]^2 .. math:: \frac{d a_i\alpha_i}{dT} = a_i\frac{T_{c,i}}{T^{2}} \left(- S_{2,i} \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right) + \sqrt{\frac{T}{T_{c,i}}} \left(S_{1,i} \sqrt{ \frac{T}{T_{c,i}}} + S_{2,i}\right)\right) \left(S_{2,i} \left(\sqrt{\frac{ T}{T_{c,i}}} - 1\right) + \sqrt{\frac{T}{T_{c,i}}} \left(S_{1,i} \left(\sqrt{ \frac{T}{T_{c,i}}} - 1\right) - 1\right)\right) .. math:: \frac{d^2 a_i\alpha_i}{dT^2} = a_i\frac{1}{2 T^{3}} \left(S_{1,i}^{2} T \sqrt{\frac{T}{T_{c,i}}} - S_{1,i} S_{2,i} T \sqrt{\frac{T}{T_{c,i}}} + 3 S_{1,i} S_{2,i} T_{c,i} \sqrt{\frac{T}{T_{c,i}}} + S_{1,i} T \sqrt{\frac{T}{T_{c,i}}} - 3 S_{2,i}^{2} T_{c,i} \sqrt{\frac{T}{T_{c,i}}} + 4 S_{2,i}^{2} T_{c,i} + 3 S_{2,i} T_{c,i} \sqrt{\frac{T}{T_{c,i}}}\right) Parameters ---------- T : float Temperature, [K] Tcs : list[float] Critical temperatures of components, [K] ais : list[float] `a` parameters of cubic EOS, :math:`a_i=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}`, [Pa*m^6/mol^2] S1s : list[float] `S1` parameters of API SRK EOS; regressed or estimated with :math:`S_{1,i} = 0.48508 + 1.55171\omega_i - 0.15613\omega_i^2`, [-] S2s : list[float] `S2` parameters of API SRK EOS; regressed or set to zero, [-] Returns ------- a_alphas : list[float] Pure component `a_alpha` terms in the cubic EOS, [Pa*m^6/mol^2] da_alpha_dTs : list[float] First temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K)] d2a_alpha_dT2s : list[float] Second temperature derivative of pure component `a_alpha`, [Pa*m^6/(mol^2*K^2)] Notes ----- Examples -------- >>> APISRK_a_alpha_and_derivatives_vectorized(T=430.0, Tcs=[514.0], ais=[1.2721974560809934], S1s=[1.678665], S2s=[-0.216396]) ([1.60465652994], [-0.0043155855337], [8.9931026263e-06]) ''' N = len(Tcs) T_inv = 1.0/T c0 = T_inv*T_inv*0.5 if a_alphas is None: a_alphas = [0.0]*N if da_alpha_dTs is None: da_alpha_dTs = [0.0]*N if d2a_alpha_dT2s is None: d2a_alpha_dT2s = [0.0]*N for i in range(N): x0 = sqrt(T/Tcs[i]) x1 = x0 - 1. x2 = x1/x0 x3 = S2s[i]*x2 x4 = S1s[i]*x1 + x3 - 1. x5 = S1s[i]*x0 x6 = S2s[i] - x3 + x5 x7 = 3.*S2s[i] a_alphas[i] = ais[i]*x4*x4 da_alpha_dTs[i] = ais[i]*x4*x6*T_inv d2a_alpha_dT2s[i] = ais[i]*(-x4*(-x2*x7 + x5 + x7) + x6*x6)*c0 return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def TWU_a_alpha_common(T, Tc, omega, a, full=True, method='PR'): r'''Function to calculate `a_alpha` and optionally its first and second derivatives for the TWUPR or TWUSRK EOS. Returns 'a_alpha', and optionally 'da_alpha_dT' and 'd2a_alpha_dT2'. Used by `TWUPR` and `TWUSRK`; has little purpose on its own. See either class for the correct reference, and examples of using the EOS. Parameters ---------- T : float Temperature, [K] Tc : float Critical temperature, [K] omega : float Acentric factor, [-] a : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] full : float Whether or not to return its first and second derivatives method : str Either 'PR' or 'SRK' Notes ----- The derivatives are somewhat long and are not described here for brevity; they are obtainable from the following SymPy expression. >>> from sympy import * >>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0') >>> Tr = T/Tc >>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0))) >>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1))) >>> alpha = alpha0 + omega*(alpha1-alpha0) >>> # diff(alpha, T) >>> # diff(alpha, T, T) ''' # e-10 works min_a_alpha = 1e-3 # There are a LOT of formulas, and they do not like having zeros Tr = T/Tc if Tr < 5e-3: # not enough: Tr from (x) 0 to 2e-4 to (y) 1e-4 2e-4 # trying: Tr from (x) 0 to 1e-3 to (y) 5e-4 1e-3 # Tr = 1e-3 + (Tr - 0.0)*(1e-3 - 5e-4)/1e-3 # Tr = 5e-4 + (Tr - 0.0)*(5e-4)/1e-3 Tr = 4e-3 + (Tr - 0.0)*(1e-3)/5e-3 T = Tc*Tr if method == 'PR': if Tr < 1.0: L0, M0, N0 = 0.125283, 0.911807, 1.948150 L1, M1, N1 = 0.511614, 0.784054, 2.812520 else: L0, M0, N0 = 0.401219, 4.963070, -0.2 L1, M1, N1 = 0.024955, 1.248089, -8. elif method == 'SRK': if Tr < 1.0: L0, M0, N0 = 0.141599, 0.919422, 2.496441 L1, M1, N1 = 0.500315, 0.799457, 3.291790 else: L0, M0, N0 = 0.441411, 6.500018, -0.20 L1, M1, N1 = 0.032580, 1.289098, -8.0 else: raise ValueError('Only `PR` and `SRK` are accepted as method') if not full: alpha0 = Tr**(N0*(M0-1.))*exp(L0*(1.-Tr**(N0*M0))) alpha1 = Tr**(N1*(M1-1.))*exp(L1*(1.-Tr**(N1*M1))) alpha = alpha0 + omega*(alpha1 - alpha0) a_alpha = a*alpha if a_alpha < min_a_alpha: a_alpha = min_a_alpha return a_alpha else: x0 = Tr x1 = M0 - 1 x2 = N0*x1 x3 = x0**x2 x4 = M0*N0 x5 = x0**x4 x6 = exp(-L0*(x5 - 1.)) x7 = x3*x6 x8 = M1 - 1. x9 = N1*x8 x10 = x0**x9 x11 = M1*N1 x12 = x0**x11 x13 = x2*x7 x14 = L0*M0*N0*x3*x5*x6 x15 = x13 - x14 x16 = exp(-L1*(x12 - 1)) x17 = -L1*M1*N1*x10*x12*x16 + x10*x16*x9 - x13 + x14 x18 = N0*N0 x19 = x18*x3*x6 x20 = x1**2*x19 x21 = M0**2 x22 = L0*x18*x3*x5*x6 x23 = x21*x22 x24 = 2*M0*x1*x22 x25 = L0**2*x0**(2*x4)*x19*x21 x26 = N1**2 x27 = x10*x16*x26 x28 = M1**2 x29 = L1*x10*x12*x16*x26 a_alpha = a*(-omega*(-x10*exp(L1*(-x12 + 1)) + x3*exp(L0*(-x5 + 1))) + x7) da_alpha_dT = a*(omega*x17 + x15)/T d2a_alpha_dT2 = a*(-(omega*(-L1**2*x0**(2.*x11)*x27*x28 + 2.*M1*x29*x8 + x17 + x20 - x23 - x24 + x25 - x27*x8**2 + x28*x29) + x15 - x20 + x23 + x24 - x25)/T**2) if a_alpha < min_a_alpha: a_alpha = min_a_alpha da_alpha_dT = d2a_alpha_dT2 = 0.0 # Hydrogen at low T # a_alpha = da_alpha_dT = d2a_alpha_dT2 = 0.0 return a_alpha, da_alpha_dT, d2a_alpha_dT2 def Twu91_alpha_pure(T, Tc, c0, c1, c2): Tr = T/Tc return (Tr**(c2*(c1 - 1.0))*exp(c0*(1.0 - (Tr)**(c1*c2)))) def Soave_1979_alpha_pure(T, Tc, M, N): Tr = T/Tc return (1.0 + (1.0 - Tr)*(M + N/Tr)) def Soave_1972_alpha_pure(T, Tc, c0): Tr = T/Tc return (c0*(-sqrt(T/Tc) + 1) + 1)**2 def Heyen_alpha_pure(T, Tc, c1, c2): return exp(c1*(1.0 -(T/Tc)**c2)) def Harmens_Knapp_alpha_pure(T, Tc, c1, c2): return (c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1)**2 def Mathias_1983_alpha_pure(T, Tc, c1, c2): Tr = T/Tc return (1 + c1*(1-sqrt(Tr)) -c2*(1-Tr)*(0.7-Tr))**2 def Mathias_Copeman_untruncated_alpha_pure(T, Tc, c1, c2, c3): return (c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2 def Mathias_Copeman_original_alpha_pure(T, Tc, c1, c2, c3): if T < Tc: return (c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2 rt = sqrt(T/Tc) tau = 1.0 - rt x = (1.0 + c1*tau) return x*x def Mathias_Copeman_alpha_pure(T, Tc, *alpha_coeffs): rt = sqrt(T/Tc) tau = 1.0 - rt if T < Tc: x0 = horner(alpha_coeffs, tau) return x0*x0 else: x = (1.0 + alpha_coeffs[-2]*tau) return x*x def Gibbons_Laughton_alpha_pure(T, Tc, c1, c2): return (c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1) + 1) def Soave_1984_alpha_pure(T, Tc, c1, c2): return (c1*(-T/Tc + 1) + c2*(-1 + Tc/T) + 1) def Yu_Lu_alpha_pure(T, Tc, c1, c2, c3, c4): return 10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1)) def Trebble_Bishnoi_alpha_pure(T, Tc, c1): return exp(c1*(-T/Tc + 1)) def Melhem_alpha_pure(T, Tc, c1, c2): return exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2) def Androulakis_alpha_pure(T, Tc, c1, c2, c3): return (c1*(-(T/Tc)**(2/3) + 1) + c2*(-(T/Tc)**(2/3) + 1)**2 + c3*(-(T/Tc)**(2/3) + 1)**3 + 1) def Schwartzentruber_alpha_pure(T, Tc, c1, c2, c3, c4): return ((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)**2) def Almeida_alpha_pure(T, Tc, c1, c2, c3): return exp(c1*(-T/Tc + 1)*abs(T/Tc - 1)**(c2 - 1) + c3*(-1 + Tc/T)) def Soave_1993_alpha_pure(T, Tc, c1, c2): return (c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2 + 1) def Gasem_alpha_pure(T, Tc, c1, c2, c3): return (exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1))) def Coquelet_alpha_pure(T, Tc, c1, c2, c3): return (exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2)) def Haghtalab_alpha_pure(T, Tc, c1, c2, c3): return exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1)) def Saffari_alpha_pure(T, Tc, c1, c2, c3): return (exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1))) def Chen_Yang_alpha_pure(T, Tc, omega, c1, c2, c3, c4, c5, c6, c7): return exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2)) class a_alpha_base(object): def _init_test(self, Tc, a, alpha_coeffs, **kwargs): self.Tc = Tc self.a = a self.alpha_coeffs = alpha_coeffs self.__dict__.update(kwargs) class Poly_a_alpha(object): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives given that there is a polynomial equation for :math:`\alpha`. .. math:: a \alpha = a\cdot \text{poly}(T) Parameters ---------- T : float Temperature, [K] Returns ------- a_alphas : list[float] Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] da_alpha_dTs : list[float] Temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K] d2a_alpha_dT2s : list[float] Second temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K**2] ''' res = horner_and_der2(self.alpha_coeffs, T) a = self.a return (a*res[0], a*res[1], a*res[2]) def a_alpha_pure(self, T): r'''Method to calculate `a_alpha` given that there is a polynomial equation for :math:`\alpha`. .. math:: a \alpha = a\cdot \text{poly}(T) Parameters ---------- T : float Temperature, [K] Returns ------- a_alpha : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] ''' return self.a*horner(self.alpha_coeffs, T) class Soave_1972_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Soave (1972) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Same as `SRK.a_alpha_and_derivatives` but slower and requiring `alpha_coeffs` to be set. One coefficient needed. .. math:: \alpha = \left(c_{0} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + 1\right)^{2} References ---------- .. [1] Soave, Giorgio. "Equilibrium Constants from a Modified Redlich- Kwong Equation of State." Chemical Engineering Science 27, no. 6 (June 1972): 1197-1203. doi:10.1016/0009-2509(72)80096-4. .. [2] Young, André F., Fernando L. P. Pessoa, and Victor R. R. Ahón. "Comparison of 20 Alpha Functions Applied in the Peng–Robinson Equation of State for Vapor Pressure Estimation." Industrial & Engineering Chemistry Research 55, no. 22 (June 8, 2016): 6506-16. doi:10.1021/acs.iecr.6b00721. ''' c0 = self.alpha_coeffs[0] Tc, a = self.Tc, self.a a_alpha = a*(c0*(-sqrt(T/Tc) + 1) + 1)**2 da_alpha_dT = -a*c0*sqrt(T/Tc)*(c0*(-sqrt(T/Tc) + 1) + 1)/T d2a_alpha_dT2 = a*c0*(c0/Tc - sqrt(T/Tc)*(c0*(sqrt(T/Tc) - 1) - 1)/T)/(2*T) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c0 = self.alpha_coeffs[0] Tc, a = self.Tc, self.a return a*Soave_1972_alpha_pure(T, Tc, c0) class Heyen_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Heyen (1980) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = e^{c_{1} \left(- \left(\frac{T}{T_{c,i}}\right)^{c_{2}} + 1\right)} References ---------- .. [1] Heyen, G. Liquid and Vapor Properties from a Cubic Equation of State. In "Proceedings of the 2nd International Conference on Phase Equilibria and Fluid Properties in the Chemical Industry". DECHEMA: Frankfurt, 1980; p 9-13. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*exp(c1*(1 -(T/Tc)**c2)) da_alpha_dT = -a*c1*c2*(T/Tc)**c2*exp(c1*(-(T/Tc)**c2 + 1))/T d2a_alpha_dT2 = a*c1*c2*(T/Tc)**c2*(c1*c2*(T/Tc)**c2 - c2 + 1)*exp(-c1*((T/Tc)**c2 - 1))/T**2 return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Heyen_alpha_pure(T, Tc, c1, c2) class Harmens_Knapp_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Harmens and Knapp (1980) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) - c_{2} \left(1 - \frac{T_{c,i}}{T}\right) + 1\right)^{2} References ---------- .. [1] Harmens, A., and H. Knapp. "Three-Parameter Cubic Equation of State for Normal Substances." Industrial & Engineering Chemistry Fundamentals 19, no. 3 (August 1, 1980): 291-94. doi:10.1021/i160075a010. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1)**2 da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*Tc*c2/T**2)*(c1*(-sqrt(T/Tc) + 1) - c2*(1 - Tc/T) + 1) d2a_alpha_dT2 = a*((c1*sqrt(T/Tc) + 2*Tc*c2/T)**2 - (c1*sqrt(T/Tc) + 8*Tc*c2/T)*(c1*(sqrt(T/Tc) - 1) + c2*(1 - Tc/T) - 1))/(2*T**2) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Harmens_Knapp_alpha_pure(T, Tc, c1, c2) class Mathias_1983_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Mathias (1983) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) - c_{2} \left(- \frac{T}{T_{c,i}} + 0.7\right) \left(- \frac{T}{T_{c,i}} + 1\right) + 1\right)^{2} References ---------- .. [1] Mathias, Paul M. "A Versatile Phase Equilibrium Equation of State." Industrial & Engineering Chemistry Process Design and Development 22, no. 3 (July 1, 1983): 385-91. doi:10.1021/i200022a008. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a Tr = T/Tc a_alpha = a*(1 + c1*(1-sqrt(Tr)) -c2*(1-Tr)*(0.7-Tr))**2 da_alpha_dT = a*(c1*(-sqrt(T/Tc) + 1) - c2*(-T/Tc + 0.7)*(-T/Tc + 1) + 1)*(2*c2*(-T/Tc + 0.7)/Tc + 2*c2*(-T/Tc + 1)/Tc - c1*sqrt(T/Tc)/T) d2a_alpha_dT2 = a*((8*c2/Tc**2 - c1*sqrt(T/Tc)/T**2)*(c1*(sqrt(T/Tc) - 1) + c2*(T/Tc - 1)*(T/Tc - 0.7) - 1) + (2*c2*(T/Tc - 1)/Tc + 2*c2*(T/Tc - 0.7)/Tc + c1*sqrt(T/Tc)/T)**2)/2 return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a Tr = T/Tc return a*Mathias_1983_alpha_pure(T, Tc, c1, c2) class Mathias_Copeman_untruncated_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Mathias and Copeman (1983) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3} \left( - \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2} References ---------- .. [1] Mathias, Paul M., and Thomas W. Copeman. "Extension of the Peng-Robinson Equation of State to Complex Mixtures: Evaluation of the Various Forms of the Local Composition Concept." Fluid Phase Equilibria 13 (January 1, 1983): 91-108. doi:10.1016/0378-3812(83)80084-3. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2 da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1) d2a_alpha_dT2 = a*(T*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2)**2 - (2*T*(c2 - 3*c3*(sqrt(T/Tc) - 1)) + Tc*sqrt(T/Tc)*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2))*(c1*(sqrt(T/Tc) - 1) - c2*(sqrt(T/Tc) - 1)**2 + c3*(sqrt(T/Tc) - 1)**3 - 1))/(2*T**2*Tc) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Mathias_Copeman_untruncated_alpha_pure(T, Tc, c1, c2, c3) class Mathias_Copeman_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Mathias and Copeman (1983) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(c_{1} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) + c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3} \left( - \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2} References ---------- .. [1] Mathias, Paul M., and Thomas W. Copeman. "Extension of the Peng-Robinson Equation of State to Complex Mixtures: Evaluation of the Various Forms of the Local Composition Concept." Fluid Phase Equilibria 13 (January 1, 1983): 91-108. doi:10.1016/0378-3812(83)80084-3. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a Tr = T/Tc if Tr > 1: x0 = 1.0/T x1 = 1.0/Tc x2 = sqrt(T*x1) x3 = c1*(x2 - 1.0) - 1.0 x4 = x0*x2*x3 a_alpha = a*x3*x3 da_alpha_dT = a*c1*x4 d2a_alpha_dT2 = 0.5*a*c1*x0*(c1*x1 - x4) return a_alpha, da_alpha_dT, d2a_alpha_dT2 else: a_alpha = a*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2 da_alpha_dT = a*(-c1*sqrt(T/Tc)/T - 2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c1*(-sqrt(T/Tc) + 1) + c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1) d2a_alpha_dT2 = a*(T*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2)**2 - (2*T*(c2 - 3*c3*(sqrt(T/Tc) - 1)) + Tc*sqrt(T/Tc)*(c1 - 2*c2*(sqrt(T/Tc) - 1) + 3*c3*(sqrt(T/Tc) - 1)**2))*(c1*(sqrt(T/Tc) - 1) - c2*(sqrt(T/Tc) - 1)**2 + c3*(sqrt(T/Tc) - 1)**3 - 1))/(2*T**2*Tc) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Mathias_Copeman_original_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Mathias_Copeman_poly_a_alpha(a_alpha_base): def a_alphas_vectorized(self, T): ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs a_alphas = [] for i in range(self.N): tau = 1.0 - (T/Tcs[i])**0.5 if T < Tcs[i]: x0 = horner(alpha_coeffs[i], tau) a_alpha = x0*x0*ais[i] else: x = (1.0 + alpha_coeffs[i][-2]*tau) a_alpha = ais[i]*x*x a_alphas.append(a_alpha) return a_alphas def a_alpha_and_derivatives_vectorized(self, T): ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [], [], [] for i in range(self.N): a = ais[i] Tc = Tcs[i] rt = (T/Tc)**0.5 tau = 1.0 - rt if T < Tc: x0, x1, x2 = horner_and_der2(alpha_coeffs[i], tau) a_alpha = x0*x0*a da_alpha_dT = -a*(rt*x0*x1/T) d2a_alpha_dT2 = a*((x0*x2/Tc + x1*x1/Tc + rt*x0*x1/T)/(2.0*T)) else: # [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before. c1 = alpha_coeffs[i][-2] x0 = 1.0/T x1 = 1.0/Tc x2 = rt#sqrt(T*x1) x3 = c1*(x2 - 1.0) - 1.0 x4 = x0*x2*x3 a_alpha = a*x3*x3 da_alpha_dT = a*c1*x4 d2a_alpha_dT2 = a*0.5*c1*x0*(c1*x1 - x4) a_alphas.append(a_alpha) da_alpha_dTs.append(da_alpha_dT) d2a_alpha_dT2s.append(d2a_alpha_dT2) return a_alphas, da_alpha_dTs, d2a_alpha_dT2s def a_alpha_pure(self, T): # alpha_coeffs [c3, c2, c1, 1] always # [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before. # return self.a*Mathias_Copeman_alpha_pure(T, self.Tc, self.alpha_coeffs) Tc = self.Tc a = self.a rt = sqrt(T/Tc) tau = 1.0 - rt alpha_coeffs = self.alpha_coeffs if T < Tc: x0 = horner(alpha_coeffs, tau) a_alpha = x0*x0*a return a_alpha else: x = (1.0 + alpha_coeffs[-2]*tau) return a*x*x def a_alpha_and_derivatives_pure(self, T): Tc = self.Tc a = self.a rt = (T/Tc)**0.5 tau = 1.0 - rt alpha_coeffs = self.alpha_coeffs if T < Tc: # Do not optimize until unit tests are in place x0, x1, x2 = horner_and_der2(alpha_coeffs, tau) a_alpha = x0*x0*a da_alpha_dT = -a*(rt*x0*x1/T) d2a_alpha_dT2 = a*((x0*x2/Tc + x1*x1/Tc + rt*x0*x1/T)/(2.0*T)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 else: ''' from sympy import * T, Tc, c1 = symbols('T, Tc, c1') tau = 1 - sqrt(T/Tc) alpha = (1 + c1*tau)**2 cse([alpha, diff(alpha, T), diff(alpha, T, T)], optimizations='basic') ''' # [-2] is the index to get the second-last coefficient (c1)! Wasted time on this before. c1 = alpha_coeffs[-2] x0 = 1.0/T x1 = 1.0/Tc x2 = rt#sqrt(T*x1) x3 = c1*(x2 - 1.0) - 1.0 x4 = x0*x2*x3 a_alpha = a*x3*x3 da_alpha_dT = a*c1*x4 d2a_alpha_dT2 = 0.5*a*c1*x0*(c1*x1 - x4) return a_alpha, da_alpha_dT, d2a_alpha_dT2 class Gibbons_Laughton_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Gibbons and Laughton (1984) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = c_{1} \left(\frac{T}{T_{c,i}} - 1\right) + c_{2} \left(\sqrt{\frac{T}{T_{c,i}}} - 1\right) + 1 References ---------- .. [1] Gibbons, Richard M., and Andrew P. Laughton. "An Equation of State for Polar and Non-Polar Substances and Mixtures" 80, no. 9 (January 1, 1984): 1019-38. doi:10.1039/F29848001019. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1) + 1) da_alpha_dT = a*(c1/Tc + c2*sqrt(T/Tc)/(2*T)) d2a_alpha_dT2 = a*(-c2*sqrt(T/Tc)/(4*T**2)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Gibbons_Laughton_alpha_pure(T, Tc, c1, c2) class Soave_1984_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Soave (1984) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2} \left(-1 + \frac{T_{c,i}}{T}\right) + 1 References ---------- .. [1] Soave, G. "Improvement of the Van Der Waals Equation of State." Chemical Engineering Science 39, no. 2 (January 1, 1984): 357-69. doi:10.1016/0009-2509(84)80034-2. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(-T/Tc + 1) + c2*(-1 + Tc/T) + 1) da_alpha_dT = a*(-c1/Tc - Tc*c2/T**2) d2a_alpha_dT2 = a*(2*Tc*c2/T**3) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Soave_1984_alpha_pure(T, Tc, c1, c2) class Yu_Lu_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Yu and Lu (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Four coefficients needed. .. math:: \alpha = 10^{c_{4} \left(- \frac{T}{T_{c,i}} + 1\right) \left( \frac{T^{2} c_{3}}{Tc^{2}} + \frac{T c_{2}}{T_{c,i}} + c_{1}\right)} References ---------- .. [1] Yu, Jin-Min, and Benjamin C. -Y. Lu. "A Three-Parameter Cubic Equation of State for Asymmetric Mixture Density Calculations." Fluid Phase Equilibria 34, no. 1 (January 1, 1987): 1-19. doi:10.1016/0378-3812(87)85047-1. ''' c1, c2, c3, c4 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1)) da_alpha_dT = a*(10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*(c4*(-T/Tc + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*(T**2*c3/Tc**2 + T*c2/Tc + c1)/Tc)*log(10)) d2a_alpha_dT2 = a*(10**(-c4*(T/Tc - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*c4*(-4*T*c3/Tc - 2*c2 - 2*c3*(T/Tc - 1) + c4*(T**2*c3/Tc**2 + T*c2/Tc + c1 + (T/Tc - 1)*(2*T*c3/Tc + c2))**2*log(10))*log(10)/Tc**2) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Yu_Lu_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Trebble_Bishnoi_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. One coefficient needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right)} References ---------- .. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four- Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1 (September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8. ''' c1 = self.alpha_coeffs[0] Tc, a = self.Tc, self.a a_alpha = a*exp(c1*(-T/Tc + 1)) da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2 return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Trebble_Bishnoi_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Melhem_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Melhem et al. (1989) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficients needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2}} References ---------- .. [1] Melhem, Georges A., Riju Saini, and Bernard M. Goodwin. "A Modified Peng-Robinson Equation of State." Fluid Phase Equilibria 47, no. 2 (August 1, 1989): 189-237. doi:10.1016/0378-3812(89)80176-1. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2) da_alpha_dT = a*((-c1/Tc - c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T)*exp(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2)) d2a_alpha_dT2 = a*(((c1/Tc - c2*sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)**2 + c2*(1/Tc - sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)/(2*T))*exp(-c1*(T/Tc - 1) + c2*(sqrt(T/Tc) - 1)**2)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Melhem_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Androulakis_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Androulakis et al. (1989) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = c_{1} \left(- \left(\frac{T}{T_{c,i}}\right)^{\frac{2}{3}} + 1\right) + c_{2} \left(- \left(\frac{T}{T_{c,i}}\right)^{\frac{2}{3}} + 1\right)^{2} + c_{3} \left(- \left(\frac{T}{T_{c,i}}\right)^{ \frac{2}{3}} + 1\right)^{3} + 1 References ---------- .. [1] Androulakis, I. P., N. S. Kalospiros, and D. P. Tassios. "Thermophysical Properties of Pure Polar and Nonpolar Compounds with a Modified VdW-711 Equation of State." Fluid Phase Equilibria 45, no. 2 (April 1, 1989): 135-63. doi:10.1016/0378-3812(89)80254-7. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(-(T/Tc)**(2/3) + 1) + c2*(-(T/Tc)**(2/3) + 1)**2 + c3*(-(T/Tc)**(2/3) + 1)**3 + 1) da_alpha_dT = a*(-2*c1*(T/Tc)**(2/3)/(3*T) - 4*c2*(T/Tc)**(2/3)*(-(T/Tc)**(2/3) + 1)/(3*T) - 2*c3*(T/Tc)**(2/3)*(-(T/Tc)**(2/3) + 1)**2/T) d2a_alpha_dT2 = a*(2*(T/Tc)**(2/3)*(c1 + 4*c2*(T/Tc)**(2/3) - 2*c2*((T/Tc)**(2/3) - 1) - 12*c3*(T/Tc)**(2/3)*((T/Tc)**(2/3) - 1) + 3*c3*((T/Tc)**(2/3) - 1)**2)/(9*T**2)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Androulakis_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Schwartzentruber_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Schwartzentruber et al. (1990) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(c_{4} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) - \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right) \left(\frac{T^{2} c_{3}} {Tc^{2}} + \frac{T c_{2}}{T_{c,i}} + c_{1}\right) + 1\right)^{2} References ---------- .. [1] J. Schwartzentruber, H. Renon, and S. Watanasiri, "K-values for Non-Ideal Systems:An Easier Way," Chem. Eng., March 1990, 118-124. ''' c1, c2, c3, c4 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)**2) da_alpha_dT = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(-2*(-sqrt(T/Tc) + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T)) d2a_alpha_dT2 = a*(((-c4*(sqrt(T/Tc) - 1) + (sqrt(T/Tc) - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(8*c3*(sqrt(T/Tc) - 1)/Tc**2 + 4*sqrt(T/Tc)*(2*T*c3/Tc + c2)/(T*Tc) + c4*sqrt(T/Tc)/T**2 - sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T**2) + (2*(sqrt(T/Tc) - 1)*(2*T*c3/Tc + c2)/Tc - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T)**2)/2) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Schwartzentruber_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Almeida_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Almeida et al. (1991) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) \left|{ \frac{T}{T_{c,i}} - 1}\right|^{c_{2} - 1} + c_{3} \left(-1 + \frac{T_{c,i}}{T}\right)} References ---------- .. [1] Almeida, G. S., M. Aznar, and A. S. Telles. "Uma Nova Forma de Dependência Com a Temperatura Do Termo Atrativo de Equações de Estado Cúbicas." RBE, Rev. Bras. Eng., Cad. Eng. Quim 8 (1991): 95. ''' # Note: Sympy didn't handle the derivative of the absolute value for # the second derivative, requires the use a CAS which can # handle the assumption that Tr-1 != 0. # A second pass on this function resulted in writting two functions: # one which works on Tr < 1, one which works on Tr > 1. c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a Tr = T/Tc if Tr > 1: x0 = c3*(1 - Tc/T) x1 = 1/Tc x2 = T*x1 - 1 x3 = c2 - 1 x4 = c1*x2**x3 x5 = x2*x4 x6 = exp(-x0 - x5) x7 = Tc*c3 x8 = x1*x4 x9 = x3*x8 + x8 + x7/T**2 x10 = x4/(Tc**2*x2) alpha, d_alpha_dT, d2_alpha_dT2 = exp(-x0 - x5), -x6*x9, x6*(-x10*x3**2 - x10*x3 + x9**2 + 2*x7/T**3) else: x0 = c3*(1 - Tc/T) x1 = 1/Tc x2 = T*x1 x3 = x2 - 1 x4 = c2 - 1 x5 = c1*(1 - x2)**x4 x6 = x3*x5 x7 = exp(-x0 - x6) x8 = Tc*c3 x9 = x1*x5 x10 = x4*x9 + x9 + x8/T**2 x11 = x5/(Tc**2*x3) alpha, d_alpha_dT, d2_alpha_dT2 = exp(-x0 - x6), -x10*x7, x7*(x10**2 - x11*x4**2 - x11*x4 + 2*x8/T**3) return a*alpha, a*d_alpha_dT, a*d2_alpha_dT2 def a_alpha_pure(self, T): return self.a*Almeida_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Twu91_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Twu et al. (1991) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(\frac{T}{T_{c,i}}\right)^{c_{3} \left(c_{2} - 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c,i}} \right)^{c_{2} c_{3}} + 1\right)} References ---------- .. [1] Twu, Chorng H., David Bluck, John R. Cunningham, and John E. Coon. "A Cubic Equation of State with a New Alpha Function and a New Mixing Rule." Fluid Phase Equilibria 69 (December 10, 1991): 33-50. doi:10.1016/0378-3812(91)90024-2. ''' c0, c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a Tr = T/Tc T_inv = 1.0/T x1 = c1 - 1.0 x2 = c2*x1 x3 = c1*c2 x4 = Tr**x3 x5 = a*Tr**x2*exp(-c0*(x4 - 1.0)) x6 = c0*x4 x7 = c1*x6 x8 = c2*x5 x9 = c1*c1*c2 d2a_alpha_dT2 = (x8*(c0*c0*x4*x4*x9 - c1 + c2*x1*x1 - 2.0*x2*x7 - x6*x9 + x7 + 1.0)*T_inv*T_inv) return x5, x8*(x1 - x7)*T_inv, d2a_alpha_dT2 def a_alpha_pure(self, T): c0, c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Twu91_alpha_pure(T, Tc, c0, c1, c2) def a_alphas_vectorized(self, T): ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs a_alphas = [] for i in range(self.N): coeffs = alpha_coeffs[i] Tr = T/Tcs[i] a_alpha = ais[i]*(Tr**(coeffs[2]*(coeffs[1] - 1.0))*exp(coeffs[0]*(1.0 - (Tr)**(coeffs[1]*coeffs[2])))) a_alphas.append(a_alpha) if self.scalar: return a_alphas return array(a_alphas) def a_alpha_and_derivatives_vectorized(self, T): r'''Method to calculate the pure-component `a_alphas` and their first and second derivatives for TWU91 alpha function EOS. This vectorized implementation is added for extra speed. .. math:: \alpha = \left(\frac{T}{T_{c,i}}\right)^{c_{3} \left(c_{2} - 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c,i}} \right)^{c_{2} c_{3}} + 1\right)} Parameters ---------- T : float Temperature, [K] Returns ------- a_alphas : list[float] Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] da_alpha_dTs : list[float] Temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K] d2a_alpha_dT2s : list[float] Second temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K**2] ''' ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs N = len(ais) a_alphas = [0.0]*N da_alpha_dTs = [0.0]*N d2a_alpha_dT2s = [0.0]*N T_inv = 1.0/T for i in range(N): coeffs = alpha_coeffs[i] c0, c1, c2 = coeffs[0], coeffs[1], coeffs[2] Tr = T/Tcs[i] x1 = c1 - 1.0 x2 = c2*x1 x3 = c1*c2 x4 = Tr**x3 x5 = ais[i]*Tr**x2*exp(-c0*(x4 - 1.0)) x6 = c0*x4 x7 = c1*x6 x8 = c2*x5 x9 = c1*c1*c2 d2a_alpha_dT2 = (x8*(c0*c0*x4*x4*x9 - c1 + c2*x1*x1 - 2.0*x2*x7 - x6*x9 + x7 + 1.0)*T_inv*T_inv) a_alphas[i] = x5 da_alpha_dTs[i] = x8*(x1 - x7)*T_inv d2a_alpha_dT2s[i] = d2a_alpha_dT2 if self.scalar: return a_alphas, da_alpha_dTs, d2a_alpha_dT2s return array(a_alphas), array(da_alpha_dTs), array(d2a_alpha_dT2s) class Soave_1993_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Soave (1983) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Two coefficient needed. .. math:: \alpha = c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) + c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + 1 References ---------- .. [1] Soave, G. "Improving the Treatment of Heavy Hydrocarbons by the SRK EOS." Fluid Phase Equilibria 84 (April 1, 1993): 339-42. doi:10.1016/0378-3812(93)85131-5. ''' c1, c2 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(c1*(-T/Tc + 1) + c2*(-sqrt(T/Tc) + 1)**2 + 1) da_alpha_dT = a*(-c1/Tc - c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T) d2a_alpha_dT2 = a*(c2*(1/Tc - sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T)/(2*T)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Soave_1993_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Gasem_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Gasem (2001) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = e^{\left(- \left(\frac{T}{T_{c,i}}\right)^{c_{3}} + 1\right) \left(\frac{T c_{2}}{T_{c,i}} + c_{1}\right)} References ---------- .. [1] Gasem, K. A. M, W Gao, Z Pan, and R. L Robinson Jr. "A Modified Temperature Dependence for the Peng-Robinson Equation of State." Fluid Phase Equilibria 181, no. 1–2 (May 25, 2001): 113-25. doi:10.1016/S0378-3812(01)00488-5. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1))) da_alpha_dT = a*((c2*(-(T/Tc)**c3 + 1)/Tc - c3*(T/Tc)**c3*(T*c2/Tc + c1)/T)*exp((-(T/Tc)**c3 + 1)*(T*c2/Tc + c1))) d2a_alpha_dT2 = a*(((c2*((T/Tc)**c3 - 1)/Tc + c3*(T/Tc)**c3*(T*c2/Tc + c1)/T)**2 - c3*(T/Tc)**c3*(2*c2/Tc + c3*(T*c2/Tc + c1)/T - (T*c2/Tc + c1)/T)/T)*exp(-((T/Tc)**c3 - 1)*(T*c2/Tc + c1))) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Gasem_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Coquelet_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Coquelet et al. (2004) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = e^{c_{1} \left(- \frac{T}{T_{c,i}} + 1\right) \left(c_{2} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{2} + c_{3} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)^{3} + 1\right)^{2}} References ---------- .. [1] Coquelet, C., A. Chapoy, and D. Richon. "Development of a New Alpha Function for the Peng–Robinson Equation of State: Comparative Study of Alpha Function Models for Pure Gases (Natural Gas Components) and Water-Gas Systems." International Journal of Thermophysics 25, no. 1 (January 1, 2004): 133-58. doi:10.1023/B:IJOT.0000022331.46865.2f. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2)) da_alpha_dT = a*((c1*(-T/Tc + 1)*(-2*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)/T - 3*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + 1)**2/T)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1) - c1*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2/Tc)*exp(c1*(-T/Tc + 1)*(c2*(-sqrt(T/Tc) + 1)**2 + c3*(-sqrt(T/Tc) + 1)**3 + 1)**2)) d2a_alpha_dT2 = a*(c1*(c1*(-(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)/Tc + sqrt(T/Tc)*(-2*c2 + 3*c3*(sqrt(T/Tc) - 1))*(sqrt(T/Tc) - 1)*(T/Tc - 1)/T)**2*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)**2 - ((T/Tc - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)*(2*c2/Tc - 6*c3*(sqrt(T/Tc) - 1)/Tc - 2*c2*sqrt(T/Tc)*(sqrt(T/Tc) - 1)/T + 3*c3*sqrt(T/Tc)*(sqrt(T/Tc) - 1)**2/T) + 4*sqrt(T/Tc)*(2*c2 - 3*c3*(sqrt(T/Tc) - 1))*(sqrt(T/Tc) - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)/Tc + (2*c2 - 3*c3*(sqrt(T/Tc) - 1))**2*(sqrt(T/Tc) - 1)**2*(T/Tc - 1)/Tc)/(2*T))*exp(-c1*(T/Tc - 1)*(c2*(sqrt(T/Tc) - 1)**2 - c3*(sqrt(T/Tc) - 1)**3 + 1)**2)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Coquelet_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Haghtalab_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Haghtalab et al. (2010) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = e^{\left(- c_{3}^{\ln{\left (\frac{T}{T_{c,i}} \right )}} + 1\right) \left(- \frac{T c_{2}}{T_{c,i}} + c_{1}\right)} References ---------- .. [1] Haghtalab, A., M. J. Kamali, S. H. Mazloumi, and P. Mahmoodi. "A New Three-Parameter Cubic Equation of State for Calculation Physical Properties and Vapor-liquid Equilibria." Fluid Phase Equilibria 293, no. 2 (June 25, 2010): 209-18. doi:10.1016/j.fluid.2010.03.029. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1)) da_alpha_dT = a*((-c2*(-c3**log(T/Tc) + 1)/Tc - c3**log(T/Tc)*(-T*c2/Tc + c1)*log(c3)/T)*exp((-c3**log(T/Tc) + 1)*(-T*c2/Tc + c1))) d2a_alpha_dT2 = a*(((c2*(c3**log(T/Tc) - 1)/Tc + c3**log(T/Tc)*(T*c2/Tc - c1)*log(c3)/T)**2 + c3**log(T/Tc)*(2*c2/Tc + (T*c2/Tc - c1)*log(c3)/T - (T*c2/Tc - c1)/T)*log(c3)/T)*exp((c3**log(T/Tc) - 1)*(T*c2/Tc - c1))) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Haghtalab_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Saffari_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Saffari and Zahedi (2013) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = e^{\frac{T c_{1}}{T_{c,i}} + c_{2} \ln{\left (\frac{T}{T_{c,i}} \right )} + c_{3} \left(- \sqrt{\frac{T}{T_{c,i}}} + 1\right)} References ---------- .. [1] Saffari, Hamid, and Alireza Zahedi. "A New Alpha-Function for the Peng-Robinson Equation of State: Application to Natural Gas." Chinese Journal of Chemical Engineering 21, no. 10 (October 1, 2013): 1155-61. doi:10.1016/S1004-9541(13)60581-9. ''' c1, c2, c3 = self.alpha_coeffs Tc, a = self.Tc, self.a a_alpha = a*(exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1))) da_alpha_dT = a*((c1/Tc + c2/T - c3*sqrt(T/Tc)/(2*T))*exp(T*c1/Tc + c2*log(T/Tc) + c3*(-sqrt(T/Tc) + 1))) d2a_alpha_dT2 = a*(((2*c1/Tc + 2*c2/T - c3*sqrt(T/Tc)/T)**2 - (4*c2 - c3*sqrt(T/Tc))/T**2)*exp(T*c1/Tc + c2*log(T/Tc) - c3*(sqrt(T/Tc) - 1))/4) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Saffari_alpha_pure(T, self.Tc, *self.alpha_coeffs) class Chen_Yang_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Hamid and Yang (2017) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Seven coefficients needed. .. math:: \alpha = e^{\left(- c_{3}^{\ln{\left (\frac{T}{T_{c,i}} \right )}} + 1\right) \left(- \frac{T c_{2}}{T_{c,i}} + c_{1}\right)} References ---------- .. [1] Chen, Zehua, and Daoyong Yang. "Optimization of the Reduced Temperature Associated with Peng–Robinson Equation of State and Soave-Redlich-Kwong Equation of State To Improve Vapor Pressure Prediction for Heavy Hydrocarbon Compounds." Journal of Chemical & Engineering Data, August 31, 2017. doi:10.1021/acs.jced.7b00496. ''' c1, c2, c3, c4, c5, c6, c7 = self.alpha_coeffs Tc, a, omega = self.Tc, self.a, self.omega a_alpha = a*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2)) da_alpha_dT = a*(-(c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)))*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2)) d2a_alpha_dT2 = a*(((c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))**2 - c4*(c5 + c6*omega + c7*omega**2)*((c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) - (c5 + c6*omega + c7*omega**2)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) + sqrt(T/Tc)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/T)/(2*T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))*exp(c4*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 - (T/Tc - 1)*(c1 + c2*omega + c3*omega**2)) return a_alpha, da_alpha_dT, d2a_alpha_dT2 def a_alpha_pure(self, T): return self.a*Chen_Yang_alpha_pure(T, self.Tc, self.omega, *self.alpha_coeffs) class TwuSRK95_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate :math:`a \alpha` and its first and second derivatives for the Twu alpha function. Uses the set values of `Tc`, `omega` and `a`. .. math:: \alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)}) .. math:: \alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})] For sub-critical conditions: L0, M0, N0 = 0.141599, 0.919422, 2.496441 L1, M1, N1 = 0.500315, 0.799457, 3.291790 For supercritical conditions: L0, M0, N0 = 0.441411, 6.500018, -0.20 L1, M1, N1 = 0.032580, 1.289098, -8.0 Parameters ---------- T : float Temperature at which to calculate the values, [-] Returns ------- a_alpha : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] da_alpha_dT : float Temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K] d2a_alpha_dT2 : float Second temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K^2] Notes ----- This method does not alter the object's state and the temperature provided can be a different than that of the object. The derivatives are somewhat long and are not described here for brevity; they are obtainable from the following SymPy expression. >>> from sympy import * # doctest:+SKIP >>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0') # doctest:+SKIP >>> Tr = T/Tc # doctest:+SKIP >>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0))) # doctest:+SKIP >>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1))) # doctest:+SKIP >>> alpha = alpha0 + omega*(alpha1-alpha0) # doctest:+SKIP >>> diff(alpha, T) # doctest:+SKIP >>> diff(alpha, T, T) # doctest:+SKIP ''' return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=True, method='SRK') def a_alpha_pure(self, T): r'''Method to calculate :math:`a \alpha` for the Twu alpha function. Uses the set values of `Tc`, `omega` and `a`. .. math:: \alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)}) .. math:: \alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})] For sub-critical conditions: L0, M0, N0 = 0.141599, 0.919422, 2.496441 L1, M1, N1 = 0.500315, 0.799457, 3.291790 For supercritical conditions: L0, M0, N0 = 0.441411, 6.500018, -0.20 L1, M1, N1 = 0.032580, 1.289098, -8.0 Parameters ---------- T : float Temperature at which to calculate the value, [-] Returns ------- a_alpha : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] Notes ----- This method does not alter the object's state and the temperature provided can be a different than that of the object. ''' return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=False, method='SRK') def a_alphas_vectorized(self, T): Tcs, omegas, ais = self.Tcs, self.omegas, self.ais a_alphas = [TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=False, method='SRK') for i in range(self.N)] if self.scalar: return a_alphas return array(a_alphas) def a_alpha_and_derivatives_vectorized(self, T): Tcs, omegas, ais = self.Tcs, self.omegas, self.ais r0, r1, r2 = [], [], [] for i in range(self.N): v0, v1, v2 = TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=True, method='SRK') r0.append(v0) r1.append(v1) r2.append(v2) if self.scalar: return r0, r1, r2 return array(r0), array(r1), array(r2) class TwuPR95_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate :math:`a \alpha` and its first and second derivatives for the Twu alpha function. Uses the set values of `Tc`, `omega` and `a`. .. math:: \alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)}) .. math:: \alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})] For sub-critical conditions: L0, M0, N0 = 0.125283, 0.911807, 1.948150; L1, M1, N1 = 0.511614, 0.784054, 2.812520 For supercritical conditions: L0, M0, N0 = 0.401219, 4.963070, -0.2; L1, M1, N1 = 0.024955, 1.248089, -8. Parameters ---------- T : float Temperature at which to calculate the values, [-] Returns ------- a_alpha : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] da_alpha_dT : float Temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K] d2a_alpha_dT2 : float Second temperature derivative of coefficient calculated by EOS-specific method, [J^2/mol^2/Pa/K^2] Notes ----- This method does not alter the object's state and the temperature provided can be a different than that of the object. The derivatives are somewhat long and are not described here for brevity; they are obtainable from the following SymPy expression. >>> from sympy import * # doctest:+SKIP >>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0') # doctest:+SKIP >>> Tr = T/Tc # doctest:+SKIP >>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0))) # doctest:+SKIP >>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1))) # doctest:+SKIP >>> alpha = alpha0 + omega*(alpha1-alpha0) # doctest:+SKIP >>> diff(alpha, T) # doctest:+SKIP >>> diff(alpha, T, T) # doctest:+SKIP ''' return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=True, method='PR') def a_alpha_pure(self, T): r'''Method to calculate :math:`a \alpha` for the Twu alpha function. Uses the set values of `Tc`, `omega` and `a`. .. math:: \alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)}) .. math:: \alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})] For sub-critical conditions: L0, M0, N0 = 0.125283, 0.911807, 1.948150; L1, M1, N1 = 0.511614, 0.784054, 2.812520 For supercritical conditions: L0, M0, N0 = 0.401219, 4.963070, -0.2; L1, M1, N1 = 0.024955, 1.248089, -8. Parameters ---------- T : float Temperature at which to calculate the value, [-] Returns ------- a_alpha : float Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa] Notes ----- This method does not alter the object's state and the temperature provided can be a different than that of the object. ''' return TWU_a_alpha_common(T, self.Tc, self.omega, self.a, full=False, method='PR') def a_alphas_vectorized(self, T): Tcs, omegas, ais = self.Tcs, self.omegas, self.ais a_alphas = [TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=False, method='PR') for i in range(self.N)] if self.scalar: return a_alphas return array(a_alphas) def a_alpha_and_derivatives_vectorized(self, T): Tcs, omegas, ais = self.Tcs, self.omegas, self.ais r0, r1, r2 = [], [], [] for i in range(self.N): v0, v1, v2 = TWU_a_alpha_common(T, Tcs[i], omegas[i], ais[i], full=True, method='PR') r0.append(v0) r1.append(v1) r2.append(v2) if self.scalar: return r0, r1, r2 return array(r0), array(r1), array(r2) class Soave_1979_a_alpha(a_alpha_base): def a_alpha_and_derivatives_pure(self, T): r'''Method to calculate `a_alpha` and its first and second derivatives according to Soave (1979) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. Three coefficients are needed. .. math:: \alpha = 1 + (1 - T_r)(M + \frac{N}{T_r}) References ---------- .. [1] Soave, G. "Rigorous and Simplified Procedures for Determining the Pure-Component Parameters in the Redlich—Kwong—Soave Equation of State." Chemical Engineering Science 35, no. 8 (January 1, 1980): 1725-30. https://doi.org/10.1016/0009-2509(80)85007-X. ''' M, N = self.alpha_coeffs#self.M, self.N Tc, a = self.Tc, self.a T_inv = 1.0/T x0 = 1.0/Tc x1 = T*x0 - 1.0 x2 = Tc*T_inv x3 = M + N*x2 x4 = N*T_inv*T_inv return (a*(1.0 - x1*x3), a*(Tc*x1*x4 - x0*x3), a*(2.0*x4*(1.0 - x1*x2))) def a_alpha_pure(self, T): M, N = self.alpha_coeffs Tc, a = self.Tc, self.a return a*Soave_1979_alpha_pure(T, self.Tc, M, N) def a_alphas_vectorized(self, T): ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs a_alphas = [] for i in range(self.N): Tr = T/Tcs[i] M, N = alpha_coeffs[i] a_alphas.append(ais[i]*(1.0 + (1.0 - Tr)*(M + N/Tr))) return a_alphas def a_alpha_and_derivatives_vectorized(self, T): ais, alpha_coeffs, Tcs = self.ais, self.alpha_coeffs, self.Tcs T_inv = 1.0/T a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [], [], [] for i in range(self.N): a = ais[i] M, N = alpha_coeffs[i] x0 = 1.0/Tcs[i] x1 = T*x0 - 1.0 x2 = Tcs[i]*T_inv x3 = M + N*x2 x4 = N*T_inv*T_inv a_alphas.append(a*(1.0 - x1*x3)) da_alpha_dTs.append(a*(Tcs[i]*x1*x4 - x0*x3)) d2a_alpha_dT2s.append(a*(2.0*x4*(1.0 - x1*x2))) return a_alphas, da_alpha_dTs, d2a_alpha_dT2s a_alpha_bases = [Soave_1972_a_alpha, Heyen_a_alpha, Harmens_Knapp_a_alpha, Mathias_1983_a_alpha, Mathias_Copeman_untruncated_a_alpha, Gibbons_Laughton_a_alpha, Soave_1984_a_alpha, Yu_Lu_a_alpha, Trebble_Bishnoi_a_alpha, Melhem_a_alpha, Androulakis_a_alpha, Schwartzentruber_a_alpha, Almeida_a_alpha, Twu91_a_alpha, Soave_1993_a_alpha, Gasem_a_alpha, Coquelet_a_alpha, Haghtalab_a_alpha, Saffari_a_alpha, Chen_Yang_a_alpha, Mathias_Copeman_poly_a_alpha, TwuSRK95_a_alpha, TwuPR95_a_alpha, Soave_1979_a_alpha]
41.049961
739
0.566892
17,066
105,170
3.331361
0.053498
0.044114
0.012189
0.013509
0.816984
0.789774
0.761402
0.729161
0.699875
0.679771
0
0.089082
0.254226
105,170
2,561
740
41.06599
0.635702
0.530275
0
0.512273
0
0
0.032559
0.019417
0
0
0
0
0
1
0.105656
false
0.001067
0.003202
0.032017
0.255069
0.001067
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c4ef799eef0d73458455dbd5bc7741eb3dcaaf42
335
py
Python
helper.py
mwaijohn/ToastyPlugin
dbc24eb3d2d987b2e84f59567901338a64e7ef03
[ "MIT" ]
null
null
null
helper.py
mwaijohn/ToastyPlugin
dbc24eb3d2d987b2e84f59567901338a64e7ef03
[ "MIT" ]
null
null
null
helper.py
mwaijohn/ToastyPlugin
dbc24eb3d2d987b2e84f59567901338a64e7ef03
[ "MIT" ]
1
2022-01-24T08:44:45.000Z
2022-01-24T08:44:45.000Z
import glob import os txtfiles = [] for file in glob.glob("/home/kingsman/Desktop/TelpoDemoSDK/app/src/main/jniLibs/arm64-v8a/*.so"): head, tail = os.path.split(file) # txtfiles.append(tail) print('<resource-file src="src/android/jnilibs/arm64-v8a/' + tail + '" target="jniLibs/arm64-v8a/' +tail + '"/>') # print(txtfiles)
37.222222
117
0.683582
47
335
4.87234
0.574468
0.157205
0.196507
0.165939
0
0
0
0
0
0
0
0.030612
0.122388
335
9
118
37.222222
0.748299
0.110448
0
0
0
0.166667
0.513514
0.445946
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
f2102a429204792867ca1e605016645764dd54d7
45,339
py
Python
com/vmware/nsx/cluster_client.py
vishal-12/vsphere-automation-sdk-python
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
[ "MIT" ]
null
null
null
com/vmware/nsx/cluster_client.py
vishal-12/vsphere-automation-sdk-python
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
[ "MIT" ]
null
null
null
com/vmware/nsx/cluster_client.py
vishal-12/vsphere-automation-sdk-python
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- #--------------------------------------------------------------------------- # Copyright 2019 VMware, Inc. All rights reserved. # AUTO GENERATED FILE -- DO NOT MODIFY! # # vAPI stub file for package com.vmware.nsx.cluster. #--------------------------------------------------------------------------- """ """ __author__ = 'VMware, Inc.' __docformat__ = 'restructuredtext en' import sys from vmware.vapi.bindings import type from vmware.vapi.bindings.converter import TypeConverter from vmware.vapi.bindings.enum import Enum from vmware.vapi.bindings.error import VapiError from vmware.vapi.bindings.struct import VapiStruct from vmware.vapi.bindings.stub import ( ApiInterfaceStub, StubFactoryBase, VapiInterface) from vmware.vapi.bindings.common import raise_core_exception from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator) from vmware.vapi.exception import CoreException from vmware.vapi.lib.constants import TaskType from vmware.vapi.lib.rest import OperationRestMetadata class Backups(VapiInterface): """ """ _VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.backups' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _BackupsStub) def retrievesshfingerprint(self, remote_server_fingerprint_request, ): """ Get SHA256 fingerprint of ECDSA key of remote server. The caller should independently verify that the key is trusted. :type remote_server_fingerprint_request: :class:`com.vmware.nsx.model_client.RemoteServerFingerprintRequest` :param remote_server_fingerprint_request: (required) :rtype: :class:`com.vmware.nsx.model_client.RemoteServerFingerprint` :return: com.vmware.nsx.model.RemoteServerFingerprint :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('retrievesshfingerprint', { 'remote_server_fingerprint_request': remote_server_fingerprint_request, }) class Node(VapiInterface): """ """ _VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.node' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _NodeStub) def reposync(self): """ Attempts to synchronize the repository partition on nsx manager. Repository partition contains packages required for the install and upgrade of nsx components.Normally there is no need to call this API explicitely by the user. :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('reposync', None) class Nodes(VapiInterface): """ """ CREATE_ACTION_NODE = "add_cluster_node" """ Possible value for ``action`` of method :func:`Nodes.create`. """ _VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.nodes' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _NodesStub) def create(self, add_cluster_node_spec, action, ): """ Adds a new controller node to the NSX cluster. :type add_cluster_node_spec: :class:`com.vmware.nsx.model_client.AddClusterNodeSpec` :param add_cluster_node_spec: (required) :type action: :class:`str` :param action: (required) :rtype: :class:`com.vmware.nsx.model_client.ClusterNodeConfig` :return: com.vmware.nsx.model.ClusterNodeConfig :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('create', { 'add_cluster_node_spec': add_cluster_node_spec, 'action': action, }) def delete(self, node_id, ): """ Removes the specified controller node from the NSX cluster. Before you can remove a controller node from the cluster, you must shut down the controller service with the \"stop service controller\" command. :type node_id: :class:`str` :param node_id: (required) :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('delete', { 'node_id': node_id, }) def get(self, node_id, ): """ Returns information about the specified NSX cluster node. :type node_id: :class:`str` :param node_id: (required) :rtype: :class:`com.vmware.nsx.model_client.ClusterNodeConfig` :return: com.vmware.nsx.model.ClusterNodeConfig :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('get', { 'node_id': node_id, }) def list(self, cursor=None, included_fields=None, page_size=None, sort_ascending=None, sort_by=None, ): """ Returns information about all NSX cluster nodes. :type cursor: :class:`str` or ``None`` :param cursor: Opaque cursor to be used for getting next page of records (supplied by current result page) (optional) :type included_fields: :class:`str` or ``None`` :param included_fields: Comma separated list of fields that should be included in query result (optional) :type page_size: :class:`long` or ``None`` :param page_size: Maximum number of results to return in this page (server may return fewer) (optional, default to 1000) :type sort_ascending: :class:`bool` or ``None`` :param sort_ascending: (optional) :type sort_by: :class:`str` or ``None`` :param sort_by: Field by which records are sorted (optional) :rtype: :class:`com.vmware.nsx.model_client.ClusterNodeConfigListResult` :return: com.vmware.nsx.model.ClusterNodeConfigListResult :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('list', { 'cursor': cursor, 'included_fields': included_fields, 'page_size': page_size, 'sort_ascending': sort_ascending, 'sort_by': sort_by, }) class Restore(VapiInterface): """ """ _VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.restore' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _RestoreStub) def advance(self, advance_cluster_restore_request, ): """ Advance any currently suspended restore operation. The operation might have been suspended because (1) the user had suspended it previously, or (2) the operation is waiting for user input, to be provided as a part of the POST request body. This operation is only valid when a GET cluster/restore/status returns a status with value SUSPENDED. Otherwise, a 409 response is returned. :type advance_cluster_restore_request: :class:`com.vmware.nsx.model_client.AdvanceClusterRestoreRequest` :param advance_cluster_restore_request: (required) :rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus` :return: com.vmware.nsx.model.ClusterRestoreStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('advance', { 'advance_cluster_restore_request': advance_cluster_restore_request, }) def cancel(self): """ Cancel any currently running restore operation. If there exists a currently running step, it is allowed to finish. The system is not rolled back to the pre-restore state. This operation is only valid when a GET cluster/restore/status returns a status with value RUNNING or SUSPENDED. Otherwise, a 409 response is returned. :rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus` :return: com.vmware.nsx.model.ClusterRestoreStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('cancel', None) def retry(self): """ Retry any currently in-progress, failed restore operation. Only the last step of the multi-step restore operation would have failed,and only that step is retried. This operation is only valid when a GET cluster/restore/status returns a status with value FAILED. Otherwise, a 409 response is returned. :rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus` :return: com.vmware.nsx.model.ClusterRestoreStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('retry', None) def start(self, initiate_cluster_restore_request, ): """ Start the restore of an NSX cluster, from some previously backed-up configuration. This operation is only valid when a GET cluster/restore/status returns a status with value NOT_STARTED. Otherwise, a 409 response is returned. :type initiate_cluster_restore_request: :class:`com.vmware.nsx.model_client.InitiateClusterRestoreRequest` :param initiate_cluster_restore_request: (required) :rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus` :return: com.vmware.nsx.model.ClusterRestoreStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('start', { 'initiate_cluster_restore_request': initiate_cluster_restore_request, }) def suspend(self): """ Suspend any currently running restore operation. The restore operation is made up of a number of steps. When this call is issued, any currently running step is allowed to finish (successfully or with errors), and the next step (and therefore the entire restore operation) is suspended until a subsequent resume or cancel call is issued. This operation is only valid when a GET cluster/restore/status returns a status with value RUNNING. Otherwise, a 409 response is returned. :rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus` :return: com.vmware.nsx.model.ClusterRestoreStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange` Conflict :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('suspend', None) class Status(VapiInterface): """ """ GET_SOURCE_REALTIME = "realtime" """ Possible value for ``source`` of method :func:`Status.get`. """ GET_SOURCE_CACHED = "cached" """ Possible value for ``source`` of method :func:`Status.get`. """ _VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.status' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _StatusStub) def get(self, source=None, ): """ Returns status information for the NSX cluster control role and management role. :type source: :class:`str` or ``None`` :param source: Data source type. (optional) :rtype: :class:`com.vmware.nsx.model_client.ClusterStatus` :return: com.vmware.nsx.model.ClusterStatus :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` Service Unavailable :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` Bad Request, Precondition Failed :raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError` Internal Server Error :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` Forbidden :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` Not Found """ return self._invoke('get', { 'source': source, }) class _BackupsStub(ApiInterfaceStub): def __init__(self, config): # properties for retrievesshfingerprint operation retrievesshfingerprint_input_type = type.StructType('operation-input', { 'remote_server_fingerprint_request': type.ReferenceType('com.vmware.nsx.model_client', 'RemoteServerFingerprintRequest'), }) retrievesshfingerprint_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } retrievesshfingerprint_input_value_validator_list = [ ] retrievesshfingerprint_output_validator_list = [ ] retrievesshfingerprint_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/backups?action=retrieve_ssh_fingerprint', request_body_parameter='remote_server_fingerprint_request', path_variables={ }, query_parameters={ }, content_type='application/json' ) operations = { 'retrievesshfingerprint': { 'input_type': retrievesshfingerprint_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'RemoteServerFingerprint'), 'errors': retrievesshfingerprint_error_dict, 'input_value_validator_list': retrievesshfingerprint_input_value_validator_list, 'output_validator_list': retrievesshfingerprint_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'retrievesshfingerprint': retrievesshfingerprint_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.cluster.backups', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class _NodeStub(ApiInterfaceStub): def __init__(self, config): # properties for reposync operation reposync_input_type = type.StructType('operation-input', {}) reposync_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } reposync_input_value_validator_list = [ ] reposync_output_validator_list = [ ] reposync_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/node?action=repo_sync', path_variables={ }, query_parameters={ }, content_type='application/json' ) operations = { 'reposync': { 'input_type': reposync_input_type, 'output_type': type.VoidType(), 'errors': reposync_error_dict, 'input_value_validator_list': reposync_input_value_validator_list, 'output_validator_list': reposync_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'reposync': reposync_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.cluster.node', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class _NodesStub(ApiInterfaceStub): def __init__(self, config): # properties for create operation create_input_type = type.StructType('operation-input', { 'add_cluster_node_spec': type.ReferenceType('com.vmware.nsx.model_client', 'AddClusterNodeSpec'), 'action': type.StringType(), }) create_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } create_input_value_validator_list = [ ] create_output_validator_list = [ ] create_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/nodes', request_body_parameter='add_cluster_node_spec', path_variables={ }, query_parameters={ 'action': 'action', }, content_type='application/json' ) # properties for delete operation delete_input_type = type.StructType('operation-input', { 'node_id': type.StringType(), }) delete_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } delete_input_value_validator_list = [ ] delete_output_validator_list = [ ] delete_rest_metadata = OperationRestMetadata( http_method='DELETE', url_template='/api/v1/cluster/nodes/{node-id}', path_variables={ 'node_id': 'node-id', }, query_parameters={ }, content_type='application/json' ) # properties for get operation get_input_type = type.StructType('operation-input', { 'node_id': type.StringType(), }) get_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/cluster/nodes/{node-id}', path_variables={ 'node_id': 'node-id', }, query_parameters={ }, content_type='application/json' ) # properties for list operation list_input_type = type.StructType('operation-input', { 'cursor': type.OptionalType(type.StringType()), 'included_fields': type.OptionalType(type.StringType()), 'page_size': type.OptionalType(type.IntegerType()), 'sort_ascending': type.OptionalType(type.BooleanType()), 'sort_by': type.OptionalType(type.StringType()), }) list_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } list_input_value_validator_list = [ ] list_output_validator_list = [ ] list_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/cluster/nodes', path_variables={ }, query_parameters={ 'cursor': 'cursor', 'included_fields': 'included_fields', 'page_size': 'page_size', 'sort_ascending': 'sort_ascending', 'sort_by': 'sort_by', }, content_type='application/json' ) operations = { 'create': { 'input_type': create_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterNodeConfig'), 'errors': create_error_dict, 'input_value_validator_list': create_input_value_validator_list, 'output_validator_list': create_output_validator_list, 'task_type': TaskType.NONE, }, 'delete': { 'input_type': delete_input_type, 'output_type': type.VoidType(), 'errors': delete_error_dict, 'input_value_validator_list': delete_input_value_validator_list, 'output_validator_list': delete_output_validator_list, 'task_type': TaskType.NONE, }, 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterNodeConfig'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, 'list': { 'input_type': list_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterNodeConfigListResult'), 'errors': list_error_dict, 'input_value_validator_list': list_input_value_validator_list, 'output_validator_list': list_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'create': create_rest_metadata, 'delete': delete_rest_metadata, 'get': get_rest_metadata, 'list': list_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.cluster.nodes', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class _RestoreStub(ApiInterfaceStub): def __init__(self, config): # properties for advance operation advance_input_type = type.StructType('operation-input', { 'advance_cluster_restore_request': type.ReferenceType('com.vmware.nsx.model_client', 'AdvanceClusterRestoreRequest'), }) advance_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } advance_input_value_validator_list = [ ] advance_output_validator_list = [ ] advance_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/restore?action=advance', request_body_parameter='advance_cluster_restore_request', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for cancel operation cancel_input_type = type.StructType('operation-input', {}) cancel_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } cancel_input_value_validator_list = [ ] cancel_output_validator_list = [ ] cancel_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/restore?action=cancel', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for retry operation retry_input_type = type.StructType('operation-input', {}) retry_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } retry_input_value_validator_list = [ ] retry_output_validator_list = [ ] retry_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/restore?action=retry', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for start operation start_input_type = type.StructType('operation-input', { 'initiate_cluster_restore_request': type.ReferenceType('com.vmware.nsx.model_client', 'InitiateClusterRestoreRequest'), }) start_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } start_input_value_validator_list = [ ] start_output_validator_list = [ ] start_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/restore?action=start', request_body_parameter='initiate_cluster_restore_request', path_variables={ }, query_parameters={ }, content_type='application/json' ) # properties for suspend operation suspend_input_type = type.StructType('operation-input', {}) suspend_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.concurrent_change': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } suspend_input_value_validator_list = [ ] suspend_output_validator_list = [ ] suspend_rest_metadata = OperationRestMetadata( http_method='POST', url_template='/api/v1/cluster/restore?action=suspend', path_variables={ }, query_parameters={ }, content_type='application/json' ) operations = { 'advance': { 'input_type': advance_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'), 'errors': advance_error_dict, 'input_value_validator_list': advance_input_value_validator_list, 'output_validator_list': advance_output_validator_list, 'task_type': TaskType.NONE, }, 'cancel': { 'input_type': cancel_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'), 'errors': cancel_error_dict, 'input_value_validator_list': cancel_input_value_validator_list, 'output_validator_list': cancel_output_validator_list, 'task_type': TaskType.NONE, }, 'retry': { 'input_type': retry_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'), 'errors': retry_error_dict, 'input_value_validator_list': retry_input_value_validator_list, 'output_validator_list': retry_output_validator_list, 'task_type': TaskType.NONE, }, 'start': { 'input_type': start_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'), 'errors': start_error_dict, 'input_value_validator_list': start_input_value_validator_list, 'output_validator_list': start_output_validator_list, 'task_type': TaskType.NONE, }, 'suspend': { 'input_type': suspend_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'), 'errors': suspend_error_dict, 'input_value_validator_list': suspend_input_value_validator_list, 'output_validator_list': suspend_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'advance': advance_rest_metadata, 'cancel': cancel_rest_metadata, 'retry': retry_rest_metadata, 'start': start_rest_metadata, 'suspend': suspend_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.cluster.restore', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class _StatusStub(ApiInterfaceStub): def __init__(self, config): # properties for get operation get_input_type = type.StructType('operation-input', { 'source': type.OptionalType(type.StringType()), }) get_error_dict = { 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), 'com.vmware.vapi.std.errors.invalid_request': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'), 'com.vmware.vapi.std.errors.internal_server_error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'), 'com.vmware.vapi.std.errors.unauthorized': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/api/v1/cluster/status', path_variables={ }, query_parameters={ 'source': 'source', }, content_type='application/json' ) operations = { 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterStatus'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'get': get_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.nsx.cluster.status', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=False) class StubFactory(StubFactoryBase): _attrs = { 'Backups': Backups, 'Node': Node, 'Nodes': Nodes, 'Restore': Restore, 'Status': Status, 'backups': 'com.vmware.nsx.cluster.backups_client.StubFactory', 'nodes': 'com.vmware.nsx.cluster.nodes_client.StubFactory', 'restore': 'com.vmware.nsx.cluster.restore_client.StubFactory', }
43.469799
133
0.612938
4,510
45,339
5.93969
0.07184
0.082985
0.094632
0.11647
0.80103
0.766463
0.707892
0.664141
0.654286
0.627669
0
0.001255
0.279693
45,339
1,042
134
43.511516
0.819003
0.274598
0
0.463108
1
0
0.324147
0.239744
0
0
0
0
0
1
0.034537
false
0
0.018838
0
0.103611
0.029827
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f21df7a5999609266596fa1f6733e207b1d490da
530
py
Python
__init__.py
Zadigo/Emails
c8577c56749fedc87c72cb19d1a764b2684da94a
[ "MIT" ]
null
null
null
__init__.py
Zadigo/Emails
c8577c56749fedc87c72cb19d1a764b2684da94a
[ "MIT" ]
null
null
null
__init__.py
Zadigo/Emails
c8577c56749fedc87c72cb19d1a764b2684da94a
[ "MIT" ]
null
null
null
# from zemailer.app.core import Settings, initialized_settings # from zemailer.app.core.mixins.patterns import PatternsMixin # from zemailer.app.core.sender import SendEmail, SendEmailWithAttachment # from zemailer.app.core.servers import Gmail, Outlook # from zemailer.app.patterns import schools # # from zemailer.app.patterns.constructor import NameConstructor # from zemailer.app.patterns.patterns import BasicNamePatterns, NamePatterns # from zemailer.core import settings # from zemailer.core.settings import configuration
48.181818
76
0.833962
64
530
6.890625
0.34375
0.244898
0.238095
0.172336
0
0
0
0
0
0
0
0
0.096226
530
10
77
53
0.920668
0.960377
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
1ef0a37a3c3fe2a144a5f52beda584047a7cfc93
148
py
Python
pathmap/test/test_getmap.py
nimral/pathmap
2d4de068fe1e21cef2ebb221bbeb2b2107bd8a32
[ "MIT" ]
null
null
null
pathmap/test/test_getmap.py
nimral/pathmap
2d4de068fe1e21cef2ebb221bbeb2b2107bd8a32
[ "MIT" ]
null
null
null
pathmap/test/test_getmap.py
nimral/pathmap
2d4de068fe1e21cef2ebb221bbeb2b2107bd8a32
[ "MIT" ]
null
null
null
import unittest from .. getmap import MapDownloader class TestGetmap(unittest.TestCase): pass if __name__ == '__main__': unittest.main()
14.8
36
0.72973
16
148
6.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.175676
148
9
37
16.444444
0.819672
0
0
0
0
0
0.054054
0
0
0
0
0
0
1
0
true
0.166667
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
1ef4ab01bdd991f0805d6af344bd9c2c933191d9
56
py
Python
js_extractor/__init__.py
bjarneschroeder/js_extractor
ab5a1cd1f12ebdbb3cc477747a7e65f7360fdd1c
[ "MIT" ]
null
null
null
js_extractor/__init__.py
bjarneschroeder/js_extractor
ab5a1cd1f12ebdbb3cc477747a7e65f7360fdd1c
[ "MIT" ]
null
null
null
js_extractor/__init__.py
bjarneschroeder/js_extractor
ab5a1cd1f12ebdbb3cc477747a7e65f7360fdd1c
[ "MIT" ]
null
null
null
from js_extractor.js_extractor import extract_js_content
56
56
0.928571
9
56
5.333333
0.666667
0.458333
0
0
0
0
0
0
0
0
0
0
0.053571
56
1
56
56
0.90566
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
480eaa333dbd1f7764c03dd983e5b6242aa66156
668
py
Python
example/test/T3_fengche.py
Michael8968/skulpt
15956a60398fac92ee1dab25bf661ffc003b2eaf
[ "MIT" ]
2
2021-12-18T06:34:26.000Z
2022-01-05T05:08:47.000Z
example/test/T3_fengche.py
Michael8968/skulpt
15956a60398fac92ee1dab25bf661ffc003b2eaf
[ "MIT" ]
null
null
null
example/test/T3_fengche.py
Michael8968/skulpt
15956a60398fac92ee1dab25bf661ffc003b2eaf
[ "MIT" ]
null
null
null
import turtle turtle.mode("logo") turtle.shape("turtle") # 让画笔变成海龟(turtle) turtle.hideturtle() # 隐藏画笔 turtle.pencolor("orange") turtle.pensize(4) # 第1个风车扇叶 turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.right(120) # 第2个风车扇叶 turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.right(120) # 第3个风车扇叶 turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.forward(100) turtle.right(120) turtle.right(120) # 风车柄 turtle.right(180) turtle.forward(200) turtle.done()
13.36
49
0.727545
94
668
5.170213
0.244681
0.294239
0.345679
0.407407
0.641975
0.641975
0.641975
0.641975
0.641975
0.641975
0
0.124361
0.121257
668
49
50
13.632653
0.703578
0.071856
0
0.7
0
0
0.026101
0
0
0
0
0
0
1
0
true
0
0.033333
0
0.033333
0
0
0
0
null
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
487e97ec59b12a677036c6e9f07871d6a1432680
65
py
Python
homedisplay/control_printer/__init__.py
ojarva/home-info-display
873d022308732baff94d0dc2381cf9dc7dce23b7
[ "BSD-3-Clause" ]
1
2016-11-28T04:35:06.000Z
2016-11-28T04:35:06.000Z
homedisplay/control_printer/__init__.py
ojarva/home-info-display
873d022308732baff94d0dc2381cf9dc7dce23b7
[ "BSD-3-Clause" ]
160
2015-01-01T20:59:29.000Z
2016-04-25T13:36:52.000Z
homedisplay/control_printer/__init__.py
ojarva/home-info-display
873d022308732baff94d0dc2381cf9dc7dce23b7
[ "BSD-3-Clause" ]
1
2015-02-25T21:24:01.000Z
2015-02-25T21:24:01.000Z
default_app_config = 'control_printer.apps.ControlPrinterConfig'
32.5
64
0.876923
7
65
7.714286
1
0
0
0
0
0
0
0
0
0
0
0
0.046154
65
1
65
65
0.870968
0
0
0
0
0
0.630769
0.630769
0
0
0
0
0
1
0
false
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
6f9e3a639bb6a98ade3dabe68659b356850e6230
152
py
Python
App/Script/train.py
RashikaKarki/GoodNews
c28664d20f2ee93078df327c502260b16fb2e17f
[ "Apache-2.0" ]
null
null
null
App/Script/train.py
RashikaKarki/GoodNews
c28664d20f2ee93078df327c502260b16fb2e17f
[ "Apache-2.0" ]
null
null
null
App/Script/train.py
RashikaKarki/GoodNews
c28664d20f2ee93078df327c502260b16fb2e17f
[ "Apache-2.0" ]
null
null
null
import pickle def train_using_logistic_regression(data): LR = pickle.load(open("Model/logistic_regression.pkl", "rb")) return LR.predict(data)
30.4
66
0.75
21
152
5.238095
0.761905
0.327273
0
0
0
0
0
0
0
0
0
0
0.125
152
5
67
30.4
0.827068
0
0
0
0
0
0.202614
0.189542
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
6fa35a962fa6c908416cd95bd0ba52e3b99e13dc
1,174
py
Python
smot/__init__.py
btski/smot
adee256c63df3ca1ceb34125e60b22a5bb24bf55
[ "MIT" ]
4
2021-12-09T17:14:08.000Z
2022-02-16T19:09:53.000Z
smot/__init__.py
btski/smot
adee256c63df3ca1ceb34125e60b22a5bb24bf55
[ "MIT" ]
2
2022-02-15T14:09:59.000Z
2022-02-21T20:07:45.000Z
smot/__init__.py
btski/smot
adee256c63df3ca1ceb34125e60b22a5bb24bf55
[ "MIT" ]
3
2022-01-24T12:23:31.000Z
2022-02-16T18:40:55.000Z
from smot.algorithm import ( treemap, treefold, treecut, treepull, treepush, tips, clean, factorByField, factorByCapture, factorByTable, isMonophyletic, imputeMonophyleticFactors, imputePatristicFactors, getLeftmost, sampleN, sampleRandom, sampleMonophyletic, sampleParaphyletic, sampleBalanced, colorTree, colorMono, colorPara, filterMono, ) from smot.parser import read_file, read_text from smot.format import newick, nexus from smot.classes import ( makeTree, makeNodeData, makeNode, ) __all__ = [ "treemap", "treefold", "treecut", "treepull", "treepush", "tips", "clean", "factorByField", "factorByCapture", "factorByTable", "isMonophyletic", "imputeMonophyleticFactors", "imputePatristicFactors", "getLeftmost", "sampleN", "sampleRandom", "sampleMonophyletic", "sampleParaphyletic", "sampleBalanced", "colorTree", "colorMono", "colorPara", "filterMono", "read_file", "read_text", "newick", "nexus", "makeTree", "makeNodeData", "makeNode", ]
17.014493
44
0.627768
81
1,174
9
0.481481
0.043896
0.060357
0.082305
0.729767
0.729767
0.729767
0.729767
0.729767
0.729767
0
0
0.262351
1,174
68
45
17.264706
0.841801
0
0
0
0
0
0.275128
0.040034
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6fba94cc8816aea5065a22666f3602e71e54b86e
330
py
Python
utils/blur.py
ariaamadeus/Image-Processing
4b9afd5f09e9cc89e599274b1ce03db3e6ec4d36
[ "MIT" ]
1
2022-03-29T11:22:10.000Z
2022-03-29T11:22:10.000Z
utils/blur.py
ariaamadeus/Image-Processing
4b9afd5f09e9cc89e599274b1ce03db3e6ec4d36
[ "MIT" ]
null
null
null
utils/blur.py
ariaamadeus/Image-Processing
4b9afd5f09e9cc89e599274b1ce03db3e6ec4d36
[ "MIT" ]
null
null
null
import os import cv2 import numpy as np def blur(img, kSize=7): return cv2.blur(img,(kSize,kSize)).astype(np.uint8) def gauss(img, kSize=7): return cv2.GaussianBlur(img, (kSize,kSize),0).astype(np.uint8) def median(img, kSize=7): return cv2.medianBlur(img, kSize).astype(np.uint8) __all__ = ['blur,gauss,median']
20.625
66
0.70303
54
330
4.222222
0.37037
0.210526
0.118421
0.197368
0.236842
0
0
0
0
0
0
0.038596
0.136364
330
15
67
22
0.761404
0
0
0
0
0
0.051515
0
0
0
0
0
0
1
0.3
false
0
0.3
0.3
0.9
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
6fe43ade088f6f1b9ca3845df2e4fdfa286774e6
159
py
Python
core/views/__init__.py
azdkj532/ntu-vote-auth-server
a4ed7caad6ec1cda4ad072f52a31725679b03be8
[ "Apache-2.0" ]
null
null
null
core/views/__init__.py
azdkj532/ntu-vote-auth-server
a4ed7caad6ec1cda4ad072f52a31725679b03be8
[ "Apache-2.0" ]
6
2016-12-07T06:39:47.000Z
2018-11-01T21:12:29.000Z
core/views/__init__.py
azdkj532/ntu-vote-auth-server
a4ed7caad6ec1cda4ad072f52a31725679b03be8
[ "Apache-2.0" ]
null
null
null
# Views from .authenticate import authenticate from .confirm import confirm from .index import index from .report import report from .complete import complete
22.714286
38
0.823899
21
159
6.238095
0.380952
0
0
0
0
0
0
0
0
0
0
0
0.138365
159
6
39
26.5
0.956204
0.031447
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d25a1ef9954330f2eb834c5aed8fe1db57f175ac
2,993
py
Python
sdk/python/pulumi_azure_native/netapp/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/netapp/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/netapp/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from ._enums import * from .account import * from .backup import * from .backup_policy import * from .get_account import * from .get_backup import * from .get_backup_policy import * from .get_pool import * from .get_snapshot import * from .get_snapshot_policy import * from .get_volume import * from .pool import * from .snapshot import * from .snapshot_policy import * from .volume import * from ._inputs import * from . import outputs # Make subpackages available: if typing.TYPE_CHECKING: import pulumi_azure_native.netapp.v20170815 as v20170815 import pulumi_azure_native.netapp.v20190501 as v20190501 import pulumi_azure_native.netapp.v20190601 as v20190601 import pulumi_azure_native.netapp.v20190701 as v20190701 import pulumi_azure_native.netapp.v20190801 as v20190801 import pulumi_azure_native.netapp.v20191001 as v20191001 import pulumi_azure_native.netapp.v20191101 as v20191101 import pulumi_azure_native.netapp.v20200201 as v20200201 import pulumi_azure_native.netapp.v20200301 as v20200301 import pulumi_azure_native.netapp.v20200501 as v20200501 import pulumi_azure_native.netapp.v20200601 as v20200601 import pulumi_azure_native.netapp.v20200701 as v20200701 import pulumi_azure_native.netapp.v20200801 as v20200801 import pulumi_azure_native.netapp.v20200901 as v20200901 import pulumi_azure_native.netapp.v20201101 as v20201101 import pulumi_azure_native.netapp.v20201201 as v20201201 else: v20170815 = _utilities.lazy_import('pulumi_azure_native.netapp.v20170815') v20190501 = _utilities.lazy_import('pulumi_azure_native.netapp.v20190501') v20190601 = _utilities.lazy_import('pulumi_azure_native.netapp.v20190601') v20190701 = _utilities.lazy_import('pulumi_azure_native.netapp.v20190701') v20190801 = _utilities.lazy_import('pulumi_azure_native.netapp.v20190801') v20191001 = _utilities.lazy_import('pulumi_azure_native.netapp.v20191001') v20191101 = _utilities.lazy_import('pulumi_azure_native.netapp.v20191101') v20200201 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200201') v20200301 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200301') v20200501 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200501') v20200601 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200601') v20200701 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200701') v20200801 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200801') v20200901 = _utilities.lazy_import('pulumi_azure_native.netapp.v20200901') v20201101 = _utilities.lazy_import('pulumi_azure_native.netapp.v20201101') v20201201 = _utilities.lazy_import('pulumi_azure_native.netapp.v20201201')
48.274194
80
0.803876
382
2,993
6.007853
0.188482
0.16732
0.237037
0.320697
0.642266
0.620479
0.29281
0
0
0
0
0.195057
0.121283
2,993
61
81
49.065574
0.677567
0.07718
0
0
1
0
0.209074
0.209074
0
0
0
0
0
1
0
false
0
0.962264
0
0.962264
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
d28318c30927c4e5f0b2066e6c29f5085efe5604
135
py
Python
aboutus/views.py
maxalmina/kudo
a089acbd359b78d344f3c5c6a695f4034c510915
[ "MIT" ]
null
null
null
aboutus/views.py
maxalmina/kudo
a089acbd359b78d344f3c5c6a695f4034c510915
[ "MIT" ]
null
null
null
aboutus/views.py
maxalmina/kudo
a089acbd359b78d344f3c5c6a695f4034c510915
[ "MIT" ]
null
null
null
from django.shortcuts import render # Create your views here. def labView(request): return render(request, 'aboutus/aboutus.html')
27
50
0.77037
18
135
5.777778
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.133333
135
5
50
27
0.888889
0.17037
0
0
0
0
0.18018
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
962d08d853d92ccffda2bace4613dafea5a285a1
23,145
py
Python
sdk/python/pulumi_google_native/networkservices/v1beta1/endpoint_policy.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
44
2021-04-18T23:00:48.000Z
2022-02-14T17:43:15.000Z
sdk/python/pulumi_google_native/networkservices/v1beta1/endpoint_policy.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
354
2021-04-16T16:48:39.000Z
2022-03-31T17:16:39.000Z
sdk/python/pulumi_google_native/networkservices/v1beta1/endpoint_policy.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
8
2021-04-24T17:46:51.000Z
2022-01-05T10:40:21.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['EndpointPolicyArgs', 'EndpointPolicy'] @pulumi.input_type class EndpointPolicyArgs: def __init__(__self__, *, endpoint_matcher: pulumi.Input['EndpointMatcherArgs'], endpoint_policy_id: pulumi.Input[str], type: pulumi.Input['EndpointPolicyType'], authorization_policy: Optional[pulumi.Input[str]] = None, client_tls_policy: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, server_tls_policy: Optional[pulumi.Input[str]] = None, traffic_port_selector: Optional[pulumi.Input['TrafficPortSelectorArgs']] = None): """ The set of arguments for constructing a EndpointPolicy resource. :param pulumi.Input['EndpointMatcherArgs'] endpoint_matcher: A matcher that selects endpoints to which the policies should be applied. :param pulumi.Input['EndpointPolicyType'] type: The type of endpoint policy. This is primarily used to validate the configuration. :param pulumi.Input[str] authorization_policy: Optional. This field specifies the URL of AuthorizationPolicy resource that applies authorization policies to the inbound traffic at the matched endpoints. Refer to Authorization. If this field is not specified, authorization is disabled(no authz checks) for this endpoint. :param pulumi.Input[str] client_tls_policy: Optional. A URL referring to a ClientTlsPolicy resource. ClientTlsPolicy can be set to specify the authentication for traffic from the proxy to the actual endpoints. More specifically, it is applied to the outgoing traffic from the proxy to the endpoint. This is typically used for sidecar model where the proxy identifies itself as endpoint to the control plane, with the connection between sidecar and endpoint requiring authentication. If this field is not set, authentication is disabled(open). Applicable only when EndpointPolicyType is SIDECAR_PROXY. :param pulumi.Input[str] description: Optional. A free-text description of the resource. Max length 1024 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Set of label tags associated with the EndpointPolicy resource. :param pulumi.Input[str] name: Name of the EndpointPolicy resource. It matches pattern `projects/{project}/locations/global/endpointPolicies/{endpoint_policy}`. :param pulumi.Input[str] server_tls_policy: Optional. A URL referring to ServerTlsPolicy resource. ServerTlsPolicy is used to determine the authentication policy to be applied to terminate the inbound traffic at the identified backends. If this field is not set, authentication is disabled(open) for this endpoint. :param pulumi.Input['TrafficPortSelectorArgs'] traffic_port_selector: Optional. Port selector for the (matched) endpoints. If no port selector is provided, the matched config is applied to all ports. """ pulumi.set(__self__, "endpoint_matcher", endpoint_matcher) pulumi.set(__self__, "endpoint_policy_id", endpoint_policy_id) pulumi.set(__self__, "type", type) if authorization_policy is not None: pulumi.set(__self__, "authorization_policy", authorization_policy) if client_tls_policy is not None: pulumi.set(__self__, "client_tls_policy", client_tls_policy) if description is not None: pulumi.set(__self__, "description", description) if labels is not None: pulumi.set(__self__, "labels", labels) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if server_tls_policy is not None: pulumi.set(__self__, "server_tls_policy", server_tls_policy) if traffic_port_selector is not None: pulumi.set(__self__, "traffic_port_selector", traffic_port_selector) @property @pulumi.getter(name="endpointMatcher") def endpoint_matcher(self) -> pulumi.Input['EndpointMatcherArgs']: """ A matcher that selects endpoints to which the policies should be applied. """ return pulumi.get(self, "endpoint_matcher") @endpoint_matcher.setter def endpoint_matcher(self, value: pulumi.Input['EndpointMatcherArgs']): pulumi.set(self, "endpoint_matcher", value) @property @pulumi.getter(name="endpointPolicyId") def endpoint_policy_id(self) -> pulumi.Input[str]: return pulumi.get(self, "endpoint_policy_id") @endpoint_policy_id.setter def endpoint_policy_id(self, value: pulumi.Input[str]): pulumi.set(self, "endpoint_policy_id", value) @property @pulumi.getter def type(self) -> pulumi.Input['EndpointPolicyType']: """ The type of endpoint policy. This is primarily used to validate the configuration. """ return pulumi.get(self, "type") @type.setter def type(self, value: pulumi.Input['EndpointPolicyType']): pulumi.set(self, "type", value) @property @pulumi.getter(name="authorizationPolicy") def authorization_policy(self) -> Optional[pulumi.Input[str]]: """ Optional. This field specifies the URL of AuthorizationPolicy resource that applies authorization policies to the inbound traffic at the matched endpoints. Refer to Authorization. If this field is not specified, authorization is disabled(no authz checks) for this endpoint. """ return pulumi.get(self, "authorization_policy") @authorization_policy.setter def authorization_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "authorization_policy", value) @property @pulumi.getter(name="clientTlsPolicy") def client_tls_policy(self) -> Optional[pulumi.Input[str]]: """ Optional. A URL referring to a ClientTlsPolicy resource. ClientTlsPolicy can be set to specify the authentication for traffic from the proxy to the actual endpoints. More specifically, it is applied to the outgoing traffic from the proxy to the endpoint. This is typically used for sidecar model where the proxy identifies itself as endpoint to the control plane, with the connection between sidecar and endpoint requiring authentication. If this field is not set, authentication is disabled(open). Applicable only when EndpointPolicyType is SIDECAR_PROXY. """ return pulumi.get(self, "client_tls_policy") @client_tls_policy.setter def client_tls_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_tls_policy", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Optional. A free-text description of the resource. Max length 1024 characters. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Optional. Set of label tags associated with the EndpointPolicy resource. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the EndpointPolicy resource. It matches pattern `projects/{project}/locations/global/endpointPolicies/{endpoint_policy}`. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="serverTlsPolicy") def server_tls_policy(self) -> Optional[pulumi.Input[str]]: """ Optional. A URL referring to ServerTlsPolicy resource. ServerTlsPolicy is used to determine the authentication policy to be applied to terminate the inbound traffic at the identified backends. If this field is not set, authentication is disabled(open) for this endpoint. """ return pulumi.get(self, "server_tls_policy") @server_tls_policy.setter def server_tls_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "server_tls_policy", value) @property @pulumi.getter(name="trafficPortSelector") def traffic_port_selector(self) -> Optional[pulumi.Input['TrafficPortSelectorArgs']]: """ Optional. Port selector for the (matched) endpoints. If no port selector is provided, the matched config is applied to all ports. """ return pulumi.get(self, "traffic_port_selector") @traffic_port_selector.setter def traffic_port_selector(self, value: Optional[pulumi.Input['TrafficPortSelectorArgs']]): pulumi.set(self, "traffic_port_selector", value) class EndpointPolicy(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, authorization_policy: Optional[pulumi.Input[str]] = None, client_tls_policy: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, endpoint_matcher: Optional[pulumi.Input[pulumi.InputType['EndpointMatcherArgs']]] = None, endpoint_policy_id: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, server_tls_policy: Optional[pulumi.Input[str]] = None, traffic_port_selector: Optional[pulumi.Input[pulumi.InputType['TrafficPortSelectorArgs']]] = None, type: Optional[pulumi.Input['EndpointPolicyType']] = None, __props__=None): """ Creates a new EndpointPolicy in a given project and location. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] authorization_policy: Optional. This field specifies the URL of AuthorizationPolicy resource that applies authorization policies to the inbound traffic at the matched endpoints. Refer to Authorization. If this field is not specified, authorization is disabled(no authz checks) for this endpoint. :param pulumi.Input[str] client_tls_policy: Optional. A URL referring to a ClientTlsPolicy resource. ClientTlsPolicy can be set to specify the authentication for traffic from the proxy to the actual endpoints. More specifically, it is applied to the outgoing traffic from the proxy to the endpoint. This is typically used for sidecar model where the proxy identifies itself as endpoint to the control plane, with the connection between sidecar and endpoint requiring authentication. If this field is not set, authentication is disabled(open). Applicable only when EndpointPolicyType is SIDECAR_PROXY. :param pulumi.Input[str] description: Optional. A free-text description of the resource. Max length 1024 characters. :param pulumi.Input[pulumi.InputType['EndpointMatcherArgs']] endpoint_matcher: A matcher that selects endpoints to which the policies should be applied. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Set of label tags associated with the EndpointPolicy resource. :param pulumi.Input[str] name: Name of the EndpointPolicy resource. It matches pattern `projects/{project}/locations/global/endpointPolicies/{endpoint_policy}`. :param pulumi.Input[str] server_tls_policy: Optional. A URL referring to ServerTlsPolicy resource. ServerTlsPolicy is used to determine the authentication policy to be applied to terminate the inbound traffic at the identified backends. If this field is not set, authentication is disabled(open) for this endpoint. :param pulumi.Input[pulumi.InputType['TrafficPortSelectorArgs']] traffic_port_selector: Optional. Port selector for the (matched) endpoints. If no port selector is provided, the matched config is applied to all ports. :param pulumi.Input['EndpointPolicyType'] type: The type of endpoint policy. This is primarily used to validate the configuration. """ ... @overload def __init__(__self__, resource_name: str, args: EndpointPolicyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates a new EndpointPolicy in a given project and location. :param str resource_name: The name of the resource. :param EndpointPolicyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EndpointPolicyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, authorization_policy: Optional[pulumi.Input[str]] = None, client_tls_policy: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, endpoint_matcher: Optional[pulumi.Input[pulumi.InputType['EndpointMatcherArgs']]] = None, endpoint_policy_id: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, server_tls_policy: Optional[pulumi.Input[str]] = None, traffic_port_selector: Optional[pulumi.Input[pulumi.InputType['TrafficPortSelectorArgs']]] = None, type: Optional[pulumi.Input['EndpointPolicyType']] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EndpointPolicyArgs.__new__(EndpointPolicyArgs) __props__.__dict__["authorization_policy"] = authorization_policy __props__.__dict__["client_tls_policy"] = client_tls_policy __props__.__dict__["description"] = description if endpoint_matcher is None and not opts.urn: raise TypeError("Missing required property 'endpoint_matcher'") __props__.__dict__["endpoint_matcher"] = endpoint_matcher if endpoint_policy_id is None and not opts.urn: raise TypeError("Missing required property 'endpoint_policy_id'") __props__.__dict__["endpoint_policy_id"] = endpoint_policy_id __props__.__dict__["labels"] = labels __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["server_tls_policy"] = server_tls_policy __props__.__dict__["traffic_port_selector"] = traffic_port_selector if type is None and not opts.urn: raise TypeError("Missing required property 'type'") __props__.__dict__["type"] = type __props__.__dict__["create_time"] = None __props__.__dict__["update_time"] = None super(EndpointPolicy, __self__).__init__( 'google-native:networkservices/v1beta1:EndpointPolicy', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'EndpointPolicy': """ Get an existing EndpointPolicy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = EndpointPolicyArgs.__new__(EndpointPolicyArgs) __props__.__dict__["authorization_policy"] = None __props__.__dict__["client_tls_policy"] = None __props__.__dict__["create_time"] = None __props__.__dict__["description"] = None __props__.__dict__["endpoint_matcher"] = None __props__.__dict__["labels"] = None __props__.__dict__["name"] = None __props__.__dict__["server_tls_policy"] = None __props__.__dict__["traffic_port_selector"] = None __props__.__dict__["type"] = None __props__.__dict__["update_time"] = None return EndpointPolicy(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="authorizationPolicy") def authorization_policy(self) -> pulumi.Output[str]: """ Optional. This field specifies the URL of AuthorizationPolicy resource that applies authorization policies to the inbound traffic at the matched endpoints. Refer to Authorization. If this field is not specified, authorization is disabled(no authz checks) for this endpoint. """ return pulumi.get(self, "authorization_policy") @property @pulumi.getter(name="clientTlsPolicy") def client_tls_policy(self) -> pulumi.Output[str]: """ Optional. A URL referring to a ClientTlsPolicy resource. ClientTlsPolicy can be set to specify the authentication for traffic from the proxy to the actual endpoints. More specifically, it is applied to the outgoing traffic from the proxy to the endpoint. This is typically used for sidecar model where the proxy identifies itself as endpoint to the control plane, with the connection between sidecar and endpoint requiring authentication. If this field is not set, authentication is disabled(open). Applicable only when EndpointPolicyType is SIDECAR_PROXY. """ return pulumi.get(self, "client_tls_policy") @property @pulumi.getter(name="createTime") def create_time(self) -> pulumi.Output[str]: """ The timestamp when the resource was created. """ return pulumi.get(self, "create_time") @property @pulumi.getter def description(self) -> pulumi.Output[str]: """ Optional. A free-text description of the resource. Max length 1024 characters. """ return pulumi.get(self, "description") @property @pulumi.getter(name="endpointMatcher") def endpoint_matcher(self) -> pulumi.Output['outputs.EndpointMatcherResponse']: """ A matcher that selects endpoints to which the policies should be applied. """ return pulumi.get(self, "endpoint_matcher") @property @pulumi.getter def labels(self) -> pulumi.Output[Mapping[str, str]]: """ Optional. Set of label tags associated with the EndpointPolicy resource. """ return pulumi.get(self, "labels") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the EndpointPolicy resource. It matches pattern `projects/{project}/locations/global/endpointPolicies/{endpoint_policy}`. """ return pulumi.get(self, "name") @property @pulumi.getter(name="serverTlsPolicy") def server_tls_policy(self) -> pulumi.Output[str]: """ Optional. A URL referring to ServerTlsPolicy resource. ServerTlsPolicy is used to determine the authentication policy to be applied to terminate the inbound traffic at the identified backends. If this field is not set, authentication is disabled(open) for this endpoint. """ return pulumi.get(self, "server_tls_policy") @property @pulumi.getter(name="trafficPortSelector") def traffic_port_selector(self) -> pulumi.Output['outputs.TrafficPortSelectorResponse']: """ Optional. Port selector for the (matched) endpoints. If no port selector is provided, the matched config is applied to all ports. """ return pulumi.get(self, "traffic_port_selector") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of endpoint policy. This is primarily used to validate the configuration. """ return pulumi.get(self, "type") @property @pulumi.getter(name="updateTime") def update_time(self) -> pulumi.Output[str]: """ The timestamp when the resource was updated. """ return pulumi.get(self, "update_time")
54.458824
608
0.687967
2,708
23,145
5.668759
0.083456
0.063058
0.053808
0.053026
0.813758
0.764185
0.716305
0.681193
0.652726
0.626409
0
0.001056
0.222467
23,145
424
609
54.587264
0.851967
0.376496
0
0.410256
1
0
0.134637
0.026283
0
0
0
0
0
1
0.150183
false
0.003663
0.029304
0.010989
0.274725
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9646f801ada53f72aa167b40ac837b6fe27e3923
1,254
py
Python
tests/test_contains.py
aefalcon/iterable_collections
8e3b4ea84083a100413f23af30ea27dfd4b838ff
[ "MIT" ]
4
2018-06-05T14:07:56.000Z
2021-04-17T12:09:23.000Z
tests/test_contains.py
aefalcon/iterable_collections
8e3b4ea84083a100413f23af30ea27dfd4b838ff
[ "MIT" ]
1
2018-07-10T19:53:54.000Z
2018-07-10T19:58:38.000Z
tests/test_contains.py
aefalcon/iterable_collections
8e3b4ea84083a100413f23af30ea27dfd4b838ff
[ "MIT" ]
2
2020-01-29T10:51:11.000Z
2021-11-11T21:37:24.000Z
import unittest from iterable_collections import collect class TestContains(unittest.TestCase): def test_list(self): c = collect([1, 2, 3, 4, 5]) self.assertTrue(c.contains(1)) self.assertFalse(c.contains('a')) def test_set(self): c = collect({1, 2, 3, 4, 5}) self.assertTrue(c.contains(1)) self.assertFalse(c.contains('a')) def test_tuple(self): c = collect({1, 2, 3, 4, 5}) self.assertTrue(c.contains(1)) self.assertFalse(c.contains('a')) def test_iter(self): c = collect(iter([1, 2, 3, 4, 5])) self.assertTrue(c.contains(1)) self.assertFalse(c.contains('a')) def test_dict(self): c = collect({'a': 1, 'b': 2}) self.assertFalse(c.contains(1)) self.assertTrue(c.contains('a')) def test_dict_items(self): c = collect({'a': 1, 'b': 2}.items()) self.assertFalse(c.contains(1)) self.assertFalse(c.contains('a')) self.assertTrue(c.contains(('a', 1))) def test_enumerate(self): c = collect([1, 2, 3, 4, 5]).enumerate().list_() self.assertFalse(c.contains(1)) self.assertFalse(c.contains('a')) self.assertTrue(c.list_().contains((0, 1)))
28.5
56
0.573365
174
1,254
4.068966
0.178161
0.190678
0.20339
0.305085
0.751412
0.724576
0.648305
0.603107
0.579096
0.579096
0
0.04127
0.246411
1,254
43
57
29.162791
0.707937
0
0
0.454545
0
0
0.009569
0
0
0
0
0
0.484848
1
0.212121
false
0
0.060606
0
0.30303
0
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
5
96544275974d2afada27f9fa124ef26ad2fd4d67
2,368
py
Python
python/tests/tools/lexeme.py
lggruspe/ibex
43cdd950c8db533e9e5b350375e1288744dc6e7e
[ "MIT" ]
null
null
null
python/tests/tools/lexeme.py
lggruspe/ibex
43cdd950c8db533e9e5b350375e1288744dc6e7e
[ "MIT" ]
1
2020-03-31T11:30:17.000Z
2020-03-31T11:30:17.000Z
python/tests/tools/lexeme.py
lggruspe/ibex
43cdd950c8db533e9e5b350375e1288744dc6e7e
[ "MIT" ]
null
null
null
import random import examples DIGITS = "0123456789" @examples.instances def empty(): return '' @examples.instances def number(): def integer(): n = random.randint(1, 9) if n == 1: return random.choice(DIGITS) rv = random.choice(DIGITS[1:]) for i in range(1, n): rv += random.choice(DIGITS) return rv rv = integer() if random.randint(0, 1): rv += "." for _ in range(random.randint(1, 9)): rv += random.choice(DIGITS) if random.randint(0, 1): rv += random.choice("eE") if random.randint(0, 1): rv += random.choice("+-") rv += integer() return rv @examples.instances def character(): escape = "\\{}".format(random.choice("nt\\")) char = chr(random.choice([32, 38] + [40, 91] + [93, 126])) return "'{}'".format(random.choice([escape, char])) @examples.instances def string(): char = chr(random.choice([32, 33] + [35, 91] + [93, 126])) escape = "\\{}".format(chr(random.choice([32, 126]))) string = "" for i in range(random.randint(0, 32)): string += random.choice([char, escape]) return f'"{string}"' @examples.instances def whitespace(): return random.choice([' ', "\t", "\n"]) @examples.instances def identifier(): A = "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" B = "0123456789" rv = random.choice(A) for _ in range(random.randint(0, 15)): rv += random.choice(A + B) return rv @examples.instances def dot(): return '.' @examples.instances def lparen(): return '(' @examples.instances def rparen(): return ')' @examples.instances def comma(): return ',' @examples.instances def star(): return '*' @examples.instances def equal(): return '=' @examples.instances def lbrace(): return '{' @examples.instances def rbrace(): return '}' @examples.instances def colon(): return ':' @examples.instances def lbracket(): return '[' @examples.instances def rbracket(): return ']' @examples.instances def plus(): return '+' @examples.instances def minus(): return '-' @examples.instances def slash(): return '/' @examples.instances def lessthan(): return '<' @examples.instances def greaterthan(): return '>' if __name__ == "__main__": examples.main()
18.5
63
0.594172
269
2,368
5.189591
0.241636
0.267908
0.315186
0.297994
0.176934
0.058023
0.044413
0.044413
0
0
0
0.038695
0.236064
2,368
127
64
18.645669
0.733002
0
0
0.29703
0
0
0.056166
0.022382
0
0
0
0
0
1
0.227723
false
0
0.019802
0.178218
0.485149
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
9680285325a0da182536c2990bfa406a81198915
31
py
Python
geomstats/backend/common.py
tolgabirdal/geomstats
27667a9fbd8d3b8fa7a5da0e34d880ce0ad39d51
[ "MIT" ]
1
2018-05-23T20:18:23.000Z
2018-05-23T20:18:23.000Z
geomstats/backend/common.py
leslie-chu/geomstats
fbed39b47b16eab4a48179106e8d0c1a5891243d
[ "MIT" ]
null
null
null
geomstats/backend/common.py
leslie-chu/geomstats
fbed39b47b16eab4a48179106e8d0c1a5891243d
[ "MIT" ]
null
null
null
import numpy as np pi = np.pi
7.75
18
0.677419
7
31
3
0.714286
0.380952
0
0
0
0
0
0
0
0
0
0
0.258065
31
3
19
10.333333
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
9694aa2e3e9609d930061103ca0726871778ff34
205
py
Python
textattack/constraints/overlap/__init__.py
cclauss/TextAttack
98b8d6102aa47bf3c41afedace0215d48f8ed046
[ "MIT" ]
2
2021-02-22T12:15:27.000Z
2021-05-02T15:22:05.000Z
textattack/constraints/overlap/__init__.py
53X/TextAttack
e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be
[ "MIT" ]
null
null
null
textattack/constraints/overlap/__init__.py
53X/TextAttack
e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be
[ "MIT" ]
1
2021-11-12T05:26:21.000Z
2021-11-12T05:26:21.000Z
from .bleu_score import BLEU from .chrf_score import chrF from .levenshtein_edit_distance import LevenshteinEditDistance from .meteor_score import METEOR from .max_words_perturbed import MaxWordsPerturbed
34.166667
62
0.878049
27
205
6.407407
0.518519
0.190751
0
0
0
0
0
0
0
0
0
0
0.097561
205
5
63
41
0.935135
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
96967fd9aeea6860c6f56440bb6aa38ff0d68c67
6,878
py
Python
operations/fleet/migrations/0008_fuelusage_historicalfuelusage.py
kaizer88/emps
2669b32c46befcf1a19390fb25013817e6b00980
[ "MIT" ]
null
null
null
operations/fleet/migrations/0008_fuelusage_historicalfuelusage.py
kaizer88/emps
2669b32c46befcf1a19390fb25013817e6b00980
[ "MIT" ]
null
null
null
operations/fleet/migrations/0008_fuelusage_historicalfuelusage.py
kaizer88/emps
2669b32c46befcf1a19390fb25013817e6b00980
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('employees', '0006_employee_department'), ('fleet', '0007_auto_20170706_2045'), ] operations = [ migrations.CreateModel( name='FuelUsage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('fleet_node_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('fms_account_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('cost_center_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('cost_centre_name', models.CharField(default=None, max_length=250, null=True, blank=True)), ('client_reference_1', models.CharField(default=None, max_length=250, null=True, blank=True)), ('client_reference_2', models.CharField(default=None, max_length=250, null=True, blank=True)), ('transaction_date', models.DateField(null=True, blank=True)), ('transaction_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('merchant_name', models.CharField(default=None, max_length=250, null=True, blank=True)), ('transaction_code', models.CharField(default=None, max_length=25, null=True, blank=True)), ('transaction_description', models.CharField(default=None, max_length=250, null=True, blank=True)), ('odometer_reading', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('distance_travelled', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('amount', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('private_usage', models.CharField(default=None, max_length=25, null=True, blank=True)), ('inhouse_indicator', models.CharField(default=None, max_length=25, null=True, blank=True)), ('current_usage', models.CharField(default=None, max_length=250, null=True, blank=True)), ('created_by', models.ForeignKey(related_name='user_fuelusage', blank=True, to=settings.AUTH_USER_MODEL, null=True)), ('driver', models.ForeignKey(related_name='driver_fuelusage', to='employees.Employee')), ('fuel_card', models.ForeignKey(related_name='fuel_card_fuelusage', blank=True, to='fleet.FuelCard', null=True)), ('modified_by', models.ForeignKey(related_name='user_modified_fuelusage', blank=True, to=settings.AUTH_USER_MODEL, null=True)), ('vehicle', models.ForeignKey(related_name='vehicle_fuelusage', to='fleet.Vehicle')), ], ), migrations.CreateModel( name='HistoricalFuelUsage', fields=[ ('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)), ('fleet_node_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('fms_account_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('cost_center_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('cost_centre_name', models.CharField(default=None, max_length=250, null=True, blank=True)), ('client_reference_1', models.CharField(default=None, max_length=250, null=True, blank=True)), ('client_reference_2', models.CharField(default=None, max_length=250, null=True, blank=True)), ('transaction_date', models.DateField(null=True, blank=True)), ('transaction_number', models.CharField(default=None, max_length=250, null=True, blank=True)), ('merchant_name', models.CharField(default=None, max_length=250, null=True, blank=True)), ('transaction_code', models.CharField(default=None, max_length=25, null=True, blank=True)), ('transaction_description', models.CharField(default=None, max_length=250, null=True, blank=True)), ('odometer_reading', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('distance_travelled', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('quantity', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('amount', models.FloatField(default=0, max_length=20, null=True, blank=True)), ('private_usage', models.CharField(default=None, max_length=25, null=True, blank=True)), ('inhouse_indicator', models.CharField(default=None, max_length=25, null=True, blank=True)), ('current_usage', models.CharField(default=None, max_length=250, null=True, blank=True)), ('history_id', models.AutoField(serialize=False, primary_key=True)), ('history_date', models.DateTimeField()), ('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])), ('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)), ('driver', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='employees.Employee', null=True)), ('fuel_card', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='fleet.FuelCard', null=True)), ('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)), ('modified_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)), ('vehicle', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='fleet.Vehicle', null=True)), ], options={ 'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical fuel usage', }, ), ]
79.976744
190
0.651643
804
6,878
5.375622
0.144279
0.083295
0.111291
0.141601
0.79292
0.79292
0.771171
0.756594
0.74248
0.74248
0
0.022146
0.19904
6,878
85
191
80.917647
0.762389
0.003053
0
0.556962
0
0
0.1593
0.016922
0
0
0
0
0
1
0
false
0
0.050633
0
0.088608
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
96b17696441ab9379b00e457fbf5bf611a99cac4
123
py
Python
backend/plays/admin.py
shinkenuu/Desafio-Full-Stack
6c96d7a09e3bdb01183411c778242f86d01aaf12
[ "MIT" ]
null
null
null
backend/plays/admin.py
shinkenuu/Desafio-Full-Stack
6c96d7a09e3bdb01183411c778242f86d01aaf12
[ "MIT" ]
null
null
null
backend/plays/admin.py
shinkenuu/Desafio-Full-Stack
6c96d7a09e3bdb01183411c778242f86d01aaf12
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Play, Reservation admin.register(Play) admin.register(Reservation)
15.375
37
0.813008
16
123
6.25
0.5625
0.26
0
0
0
0
0
0
0
0
0
0
0.113821
123
7
38
17.571429
0.917431
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5