hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4e88474ab017434713d6764dcf24407a16b343a
| 89
|
py
|
Python
|
tests/__init__.py
|
clu-ling/clu-phontools
|
304510150c6f9a4b0e1372bc9275630b7f976aeb
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
clu-ling/clu-phontools
|
304510150c6f9a4b0e1372bc9275630b7f976aeb
|
[
"Apache-2.0"
] | 3
|
2021-06-15T23:32:30.000Z
|
2021-09-01T18:49:20.000Z
|
tests/__init__.py
|
clu-ling/clu-phontools
|
304510150c6f9a4b0e1372bc9275630b7f976aeb
|
[
"Apache-2.0"
] | 1
|
2021-06-18T05:48:29.000Z
|
2021-06-18T05:48:29.000Z
|
# -*- coding: utf-8 -*-
# TODO: add phrases here for eas of use in multiple test suites
| 22.25
| 63
| 0.662921
| 15
| 89
| 3.933333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.213483
| 89
| 3
| 64
| 29.666667
| 0.828571
| 0.932584
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.333333
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be197aedf2a5e4757e87902f3ea6ae392963f8ff
| 196
|
py
|
Python
|
dotloop/detail.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | null | null | null |
dotloop/detail.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | null | null | null |
dotloop/detail.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | 1
|
2021-07-28T14:28:17.000Z
|
2021-07-28T14:28:17.000Z
|
from .bases import DotloopObject
class Detail(DotloopObject):
def get(self):
return self.fetch('get')
def patch(self, **kwargs):
return self.fetch('patch', json=kwargs)
| 19.6
| 47
| 0.653061
| 24
| 196
| 5.333333
| 0.583333
| 0.15625
| 0.234375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219388
| 196
| 9
| 48
| 21.777778
| 0.836601
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
076df1c420f981336caaeec495ef473bdcab621d
| 13,968
|
py
|
Python
|
parsetab.py
|
MrSuicideParrot/pyGoCompiler
|
5133a2ff5221f3b79f6a82439b1f06be30c89b52
|
[
"MIT"
] | null | null | null |
parsetab.py
|
MrSuicideParrot/pyGoCompiler
|
5133a2ff5221f3b79f6a82439b1f06be30c89b52
|
[
"MIT"
] | null | null | null |
parsetab.py
|
MrSuicideParrot/pyGoCompiler
|
5133a2ff5221f3b79f6a82439b1f06be30c89b52
|
[
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'nonassocLESSMOREEQUALSTOMOREEQUALLESSEQUALNOTEQUALleftPLUSMINUSleftTIMESDIVIDEleftANDORrightUMINUSINT FLOAT PLUS MINUS TIMES DIVIDE EQUALS LPAREN RPAREN LCURLBRACKET RCURLBRACKET ID COMMENT STRING ASSIGN SEMICOLON COMMA POINT NOT EQUALSTO MORE LESS MOREEQUAL LESSEQUAL NOTEQUAL AND OR INCREMENT DECREMENT BREAK CASE CHAN CONST CONTINUE DEFAULT DEFER ELSE FALLTHROUGH FOR FUNC GO GOTO IF IMPORT INTERFACE MAP PACKAGE RANGE RETURN SELECT STRUCT SWITCH TYPE VAR MAIN FMT PRINT SCAN TRUE FALSEstatement : PACKAGE MAIN IMPORT STRING FUNC MAIN LPAREN RPAREN LCURLBRACKET list RCURLBRACKET\n | PACKAGE MAIN IMPORT STRING FUNC MAIN LPAREN RPAREN LCURLBRACKET RCURLBRACKETlist : inst\n | inst listassignment : ID ASSIGN expressionAR\n | ID ASSIGN expressionBo\n | ID EQUALS expressionAR\n | ID EQUALS expressionBo\n | ID INCREMENT\n | ID DECREMENTinst : FOR expressionBo LCURLBRACKET list RCURLBRACKET\n | FOR assignment SEMICOLON expressionBo SEMICOLON assignment LCURLBRACKET list RCURLBRACKETinst : assignment SEMICOLONinst : IF expressionBo LCURLBRACKET list RCURLBRACKET ELSE LCURLBRACKET list RCURLBRACKET\n | IF expressionBo LCURLBRACKET list RCURLBRACKETlistID : expressionAR\n | expressionBo\n | expressionBo COMMA listID\n | expressionAR COMMA listIDIDlist : ID\n | ID COMMA IDlistinst : FMT POINT PRINT LPAREN listID RPAREN SEMICOLON\n | FMT POINT SCAN LPAREN IDlist RPAREN SEMICOLON\n | FMT POINT PRINT LPAREN RPAREN SEMICOLON\n | FMT POINT SCAN LPAREN RPAREN SEMICOLONexpressionAR : expressionAR PLUS expressionAR\n | expressionAR MINUS expressionAR\n | expressionAR TIMES expressionAR\n | expressionAR DIVIDE expressionAR\n | IDexpressionAR : INTexpressionAR : MINUS expressionAR %prec UMINUSexpressionAR : FLOATexpressionAR : LPAREN expressionAR RPARENexpressionBo : expressionAR MORE expressionAR\n | expressionAR LESS expressionAR\n | expressionAR MOREEQUAL expressionAR\n | expressionAR LESSEQUAL expressionAR\n | expressionBo NOTEQUAL expressionBo\n | expressionAR NOTEQUAL expressionAR\n | expressionBo EQUALSTO expressionBo\n | expressionAR EQUALSTO expressionAR\n | expressionBo AND expressionBo\n | expressionBo OR expressionBoexpressionBo : NOT expressionBo %prec UMINUSexpressionBo : TRUE\n | FALSEexpressionBo : LPAREN expressionBo RPAREN'
_lr_action_items = {'PACKAGE':([0,],[2,]),'$end':([1,12,19,],[0,-2,-1,]),'MAIN':([2,6,],[3,7,]),'IMPORT':([3,],[4,]),'STRING':([4,],[5,]),'FUNC':([5,],[6,]),'LPAREN':([7,14,16,24,27,29,36,37,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,62,63,66,89,106,107,],[8,27,27,27,27,60,66,66,27,27,27,27,27,60,60,60,60,60,60,60,60,60,60,60,89,90,66,66,66,66,]),'RPAREN':([8,25,26,30,31,34,56,57,58,59,70,71,72,73,75,76,77,78,79,80,81,82,83,84,85,86,87,89,90,91,95,97,98,99,101,114,115,117,],[9,-46,-47,-31,-33,-30,-45,85,86,-32,-39,-41,-43,-44,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,86,96,100,86,104,-16,-17,108,-20,-19,-18,-21,]),'LCURLBRACKET':([9,21,25,26,30,31,33,34,38,39,56,59,64,65,67,68,70,71,72,73,75,76,77,78,79,80,81,82,83,84,85,86,102,103,],[10,40,-46,-47,-31,-33,61,-30,-9,-10,-45,-32,-5,-6,-7,-8,-39,-41,-43,-44,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,111,112,]),'RCURLBRACKET':([10,11,13,20,32,69,88,92,94,105,109,113,116,118,119,120,121,],[12,19,-3,-4,-13,92,94,-11,-15,-24,-25,-22,-23,120,121,-12,-14,]),'FOR':([10,13,32,40,61,92,94,105,109,111,112,113,116,120,121,],[14,14,-13,14,14,-11,-15,-24,-25,14,14,-22,-23,-12,-14,]),'IF':([10,13,32,40,61,92,94,105,109,111,112,113,116,120,121,],[16,16,-13,16,16,-11,-15,-24,-25,16,16,-22,-23,-12,-14,]),'FMT':([10,13,32,40,61,92,94,105,109,111,112,113,116,120,121,],[17,17,-13,17,17,-11,-15,-24,-25,17,17,-22,-23,-12,-14,]),'ID':([10,13,14,16,24,27,29,32,36,37,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,61,66,89,90,92,93,94,105,106,107,109,110,111,112,113,116,120,121,],[18,18,28,34,34,34,34,-13,34,34,18,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,18,34,34,101,-11,18,-15,-24,34,34,-25,101,18,18,-22,-23,-12,-14,]),'NOT':([14,16,24,27,36,37,41,42,43,44,45,66,89,106,107,],[24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,]),'TRUE':([14,16,24,27,36,37,41,42,43,44,45,66,89,106,107,],[25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,]),'FALSE':([14,16,24,27,36,37,41,42,43,44,45,66,89,106,107,],[26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,]),'INT':([14,16,24,27,29,36,37,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,66,89,106,107,],[30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,]),'MINUS':([14,16,23,24,27,28,29,30,31,34,36,37,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,58,59,60,64,66,67,75,76,77,78,79,80,81,82,83,84,86,87,89,91,97,106,107,],[29,29,53,29,29,-30,29,-31,-33,-30,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,53,-32,29,53,29,53,53,53,53,53,53,53,-26,-27,-28,-29,-34,53,29,53,53,29,29,]),'FLOAT':([14,16,24,27,29,36,37,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,66,89,106,107,],[31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,]),'SEMICOLON':([15,22,25,26,30,31,34,38,39,56,59,64,65,67,68,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,96,100,104,108,],[32,45,-46,-47,-31,-33,-30,-9,-10,-45,-32,-5,-6,-7,-8,-39,-41,-43,-44,93,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,105,109,113,116,]),'POINT':([17,],[35,]),'ASSIGN':([18,28,],[36,36,]),'EQUALS':([18,28,],[37,37,]),'INCREMENT':([18,28,],[38,38,]),'DECREMENT':([18,28,],[39,39,]),'NOTEQUAL':([21,23,25,26,28,30,31,33,34,56,57,58,59,64,65,67,68,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,91,97,98,],[41,50,-46,-47,-30,-31,-33,41,-30,-45,41,50,-32,50,41,50,41,None,None,-43,-44,41,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,50,50,41,]),'EQUALSTO':([21,23,25,26,28,30,31,33,34,56,57,58,59,64,65,67,68,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,91,97,98,],[42,51,-46,-47,-30,-31,-33,42,-30,-45,42,51,-32,51,42,51,42,None,None,-43,-44,42,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,51,51,42,]),'AND':([21,25,26,30,31,33,34,56,57,59,65,68,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,98,],[43,-46,-47,-31,-33,43,-30,-45,43,-32,43,43,43,43,-43,-44,43,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,43,]),'OR':([21,25,26,30,31,33,34,56,57,59,65,68,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,98,],[44,-46,-47,-31,-33,44,-30,-45,44,-32,44,44,44,44,-43,-44,44,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,44,]),'MORE':([23,28,30,31,34,58,59,64,67,81,82,83,84,86,91,97,],[46,-30,-31,-33,-30,46,-32,46,46,-26,-27,-28,-29,-34,46,46,]),'LESS':([23,28,30,31,34,58,59,64,67,81,82,83,84,86,91,97,],[47,-30,-31,-33,-30,47,-32,47,47,-26,-27,-28,-29,-34,47,47,]),'MOREEQUAL':([23,28,30,31,34,58,59,64,67,81,82,83,84,86,91,97,],[48,-30,-31,-33,-30,48,-32,48,48,-26,-27,-28,-29,-34,48,48,]),'LESSEQUAL':([23,28,30,31,34,58,59,64,67,81,82,83,84,86,91,97,],[49,-30,-31,-33,-30,49,-32,49,49,-26,-27,-28,-29,-34,49,49,]),'PLUS':([23,28,30,31,34,58,59,64,67,75,76,77,78,79,80,81,82,83,84,86,87,91,97,],[52,-30,-31,-33,-30,52,-32,52,52,52,52,52,52,52,52,-26,-27,-28,-29,-34,52,52,52,]),'TIMES':([23,28,30,31,34,58,59,64,67,75,76,77,78,79,80,81,82,83,84,86,87,91,97,],[54,-30,-31,-33,-30,54,-32,54,54,54,54,54,54,54,54,54,54,-28,-29,-34,54,54,54,]),'DIVIDE':([23,28,30,31,34,58,59,64,67,75,76,77,78,79,80,81,82,83,84,86,87,91,97,],[55,-30,-31,-33,-30,55,-32,55,55,55,55,55,55,55,55,55,55,-28,-29,-34,55,55,55,]),'COMMA':([25,26,30,31,34,56,59,70,71,72,73,75,76,77,78,79,80,81,82,83,84,85,86,97,98,101,],[-46,-47,-31,-33,-30,-45,-32,-39,-41,-43,-44,-35,-36,-37,-38,-40,-42,-26,-27,-28,-29,-48,-34,106,107,110,]),'PRINT':([35,],[62,]),'SCAN':([35,],[63,]),'ELSE':([94,],[103,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'statement':([0,],[1,]),'list':([10,13,40,61,111,112,],[11,20,69,88,118,119,]),'inst':([10,13,40,61,111,112,],[13,13,13,13,13,13,]),'assignment':([10,13,14,40,61,93,111,112,],[15,15,22,15,15,102,15,15,]),'expressionBo':([14,16,24,27,36,37,41,42,43,44,45,66,89,106,107,],[21,33,56,57,65,68,70,71,72,73,74,57,98,98,98,]),'expressionAR':([14,16,24,27,29,36,37,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60,66,89,106,107,],[23,23,23,58,59,64,67,23,23,23,23,23,75,76,77,78,79,80,81,82,83,84,87,91,97,97,97,]),'listID':([89,106,107,],[95,114,115,]),'IDlist':([90,110,],[99,117,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> statement","S'",1,None,None,None),
('statement -> PACKAGE MAIN IMPORT STRING FUNC MAIN LPAREN RPAREN LCURLBRACKET list RCURLBRACKET','statement',11,'p_statement_expr','plintax.py',17),
('statement -> PACKAGE MAIN IMPORT STRING FUNC MAIN LPAREN RPAREN LCURLBRACKET RCURLBRACKET','statement',10,'p_statement_expr','plintax.py',18),
('list -> inst','list',1,'p_list','plintax.py',26),
('list -> inst list','list',2,'p_list','plintax.py',27),
('assignment -> ID ASSIGN expressionAR','assignment',3,'p_assignment','plintax.py',34),
('assignment -> ID ASSIGN expressionBo','assignment',3,'p_assignment','plintax.py',35),
('assignment -> ID EQUALS expressionAR','assignment',3,'p_assignment','plintax.py',36),
('assignment -> ID EQUALS expressionBo','assignment',3,'p_assignment','plintax.py',37),
('assignment -> ID INCREMENT','assignment',2,'p_assignment','plintax.py',38),
('assignment -> ID DECREMENT','assignment',2,'p_assignment','plintax.py',39),
('inst -> FOR expressionBo LCURLBRACKET list RCURLBRACKET','inst',5,'p_inst_For','plintax.py',53),
('inst -> FOR assignment SEMICOLON expressionBo SEMICOLON assignment LCURLBRACKET list RCURLBRACKET','inst',9,'p_inst_For','plintax.py',54),
('inst -> assignment SEMICOLON','inst',2,'p_inst_assignment','plintax.py',62),
('inst -> IF expressionBo LCURLBRACKET list RCURLBRACKET ELSE LCURLBRACKET list RCURLBRACKET','inst',9,'p_inst_If','plintax.py',67),
('inst -> IF expressionBo LCURLBRACKET list RCURLBRACKET','inst',5,'p_inst_If','plintax.py',68),
('listID -> expressionAR','listID',1,'p_listID','plintax.py',75),
('listID -> expressionBo','listID',1,'p_listID','plintax.py',76),
('listID -> expressionBo COMMA listID','listID',3,'p_listID','plintax.py',77),
('listID -> expressionAR COMMA listID','listID',3,'p_listID','plintax.py',78),
('IDlist -> ID','IDlist',1,'p_IDlist','plintax.py',85),
('IDlist -> ID COMMA IDlist','IDlist',3,'p_IDlist','plintax.py',86),
('inst -> FMT POINT PRINT LPAREN listID RPAREN SEMICOLON','inst',7,'p_inst_func','plintax.py',102),
('inst -> FMT POINT SCAN LPAREN IDlist RPAREN SEMICOLON','inst',7,'p_inst_func','plintax.py',103),
('inst -> FMT POINT PRINT LPAREN RPAREN SEMICOLON','inst',6,'p_inst_func','plintax.py',104),
('inst -> FMT POINT SCAN LPAREN RPAREN SEMICOLON','inst',6,'p_inst_func','plintax.py',105),
('expressionAR -> expressionAR PLUS expressionAR','expressionAR',3,'p_expressionAR_binop','plintax.py',115),
('expressionAR -> expressionAR MINUS expressionAR','expressionAR',3,'p_expressionAR_binop','plintax.py',116),
('expressionAR -> expressionAR TIMES expressionAR','expressionAR',3,'p_expressionAR_binop','plintax.py',117),
('expressionAR -> expressionAR DIVIDE expressionAR','expressionAR',3,'p_expressionAR_binop','plintax.py',118),
('expressionAR -> ID','expressionAR',1,'p_expressionAR_binop','plintax.py',119),
('expressionAR -> INT','expressionAR',1,'p_expressionAR_int','plintax.py',134),
('expressionAR -> MINUS expressionAR','expressionAR',2,'p_expressionAR_inverse','plintax.py',139),
('expressionAR -> FLOAT','expressionAR',1,'p_expressionAR_float','plintax.py',144),
('expressionAR -> LPAREN expressionAR RPAREN','expressionAR',3,'p_expressionAR_group','plintax.py',149),
('expressionBo -> expressionAR MORE expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',156),
('expressionBo -> expressionAR LESS expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',157),
('expressionBo -> expressionAR MOREEQUAL expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',158),
('expressionBo -> expressionAR LESSEQUAL expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',159),
('expressionBo -> expressionBo NOTEQUAL expressionBo','expressionBo',3,'p_expressionBo_binop','plintax.py',160),
('expressionBo -> expressionAR NOTEQUAL expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',161),
('expressionBo -> expressionBo EQUALSTO expressionBo','expressionBo',3,'p_expressionBo_binop','plintax.py',162),
('expressionBo -> expressionAR EQUALSTO expressionAR','expressionBo',3,'p_expressionBo_binop','plintax.py',163),
('expressionBo -> expressionBo AND expressionBo','expressionBo',3,'p_expressionBo_binop','plintax.py',164),
('expressionBo -> expressionBo OR expressionBo','expressionBo',3,'p_expressionBo_binop','plintax.py',165),
('expressionBo -> NOT expressionBo','expressionBo',2,'p_expressionBo_inverse','plintax.py',186),
('expressionBo -> TRUE','expressionBo',1,'p_expressionBo_int','plintax.py',192),
('expressionBo -> FALSE','expressionBo',1,'p_expressionBo_int','plintax.py',193),
('expressionBo -> LPAREN expressionBo RPAREN','expressionBo',3,'p_expressionBo_group','plintax.py',201),
]
| 179.076923
| 5,326
| 0.659937
| 2,550
| 13,968
| 3.558039
| 0.102745
| 0.047614
| 0.016533
| 0.021162
| 0.514383
| 0.47184
| 0.443404
| 0.389948
| 0.275984
| 0.240824
| 0
| 0.262992
| 0.089419
| 13,968
| 77
| 5,327
| 181.402597
| 0.45035
| 0.004439
| 0
| 0.029412
| 1
| 0.014706
| 0.48137
| 0.013667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
077a4f9fec09dfc396b0a5f4c0029a8eb995111d
| 20
|
py
|
Python
|
checkov/version.py
|
smrojas/checkov
|
d83f5024bf771fa8d795f9a303e603ed107895e1
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
smrojas/checkov
|
d83f5024bf771fa8d795f9a303e603ed107895e1
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
smrojas/checkov
|
d83f5024bf771fa8d795f9a303e603ed107895e1
|
[
"Apache-2.0"
] | null | null | null |
version = '2.0.962'
| 10
| 19
| 0.6
| 4
| 20
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.15
| 20
| 1
| 20
| 20
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07b1e5140f06db85db1ad17da65d7d9126e6b75c
| 164
|
py
|
Python
|
tests/web_platform/CSS2/positioning/test_absolute_replaced_width.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/CSS2/positioning/test_absolute_replaced_width.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/CSS2/positioning/test_absolute_replaced_width.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestAbsoluteReplacedWidth(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'absolute-replaced-width-'))
| 27.333333
| 79
| 0.804878
| 17
| 164
| 7.470588
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.085366
| 164
| 5
| 80
| 32.8
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0.147239
| 0.147239
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07b617bf989ca1e103a481abb7a84443c8a9d841
| 9,839
|
py
|
Python
|
horovod/mxnet/mpi_ops.py
|
gate2k1/horovod
|
38e91bee84efbb5b563a4928027a75dc3974633b
|
[
"Apache-2.0"
] | 12
|
2020-06-04T20:23:49.000Z
|
2022-03-18T18:22:59.000Z
|
horovod/mxnet/mpi_ops.py
|
gate2k1/horovod
|
38e91bee84efbb5b563a4928027a75dc3974633b
|
[
"Apache-2.0"
] | 4
|
2020-12-04T21:00:38.000Z
|
2022-01-22T12:49:30.000Z
|
horovod/mxnet/mpi_ops.py
|
gate2k1/horovod
|
38e91bee84efbb5b563a4928027a75dc3974633b
|
[
"Apache-2.0"
] | 8
|
2020-07-25T15:25:47.000Z
|
2022-03-17T02:27:15.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Load all the necessary MXNet C types.
import ctypes
import os
import mxnet as mx
from mxnet.base import c_str, check_call, string_types
from horovod.common.util import get_ext_suffix
from horovod.common.basics import HorovodBasics as _HorovodBasics
_basics = _HorovodBasics(__file__, 'mpi_lib')
# import basic methods
init = _basics.init
shutdown = _basics.shutdown
size = _basics.size
local_size = _basics.local_size
rank = _basics.rank
local_rank = _basics.local_rank
mpi_threads_supported = _basics.mpi_threads_supported
mpi_enabled = _basics.mpi_enabled
mpi_built = _basics.mpi_built
gloo_enabled = _basics.gloo_enabled
gloo_built = _basics.gloo_built
nccl_built = _basics.nccl_built
ddl_built = _basics.ddl_built
mlsl_built = _basics.mlsl_built
dll_path = os.path.join(os.path.dirname(__file__),
'mpi_lib' + get_ext_suffix())
MPI_MXNET_LIB_CTYPES = ctypes.CDLL(dll_path, ctypes.RTLD_GLOBAL)
def allreduce(tensor, average=True, name=None, priority=0):
"""
A function that performs averaging or summation of the input tensor over
all the Horovod processes. The input tensor is not modified.
The reduction operation is keyed by the name. If name is not provided, an
incremented auto-generated name is used. The tensor type and shape must be
the same on all Horovod processes for a given name. The reduction will not
start until all processes are ready to send and receive the tensor.
This acts as a thin wrapper around an autograd function. If your input
tensor requires gradients, then callings this function will allow gradients
to be computed and backpropagated.
Arguments:
tensor: A tensor to average and sum.
average: A flag indicating whether to compute average or summation,
defaults to average.
name: A name of the reduction operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same shape and type as `tensor`, averaged or summed
across all processes.
"""
output = mx.nd.zeros(shape=tensor.shape, ctx=tensor.context,
dtype=tensor.dtype)
c_in = tensor.handle
c_out = output.handle
if isinstance(name, string_types):
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
c_in, c_out, c_str(name), ctypes.c_bool(average),
ctypes.c_int(priority)))
else:
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
c_in, c_out, name, ctypes.c_bool(average),
ctypes.c_int(priority)))
return output
def allreduce_(tensor, average=True, name=None, priority=0):
"""
A function that performs in-place averaging or summation of the input
tensor over all the Horovod processes.
The reduction operation is keyed by the name. If name is not provided, an
incremented auto-generated name is used. The tensor type and shape must be
the same on all Horovod processes for a given name. The reduction will not
start until all processes are ready to send and receive the tensor.
Arguments:
tensor: A tensor to average and sum.
average: A flag indicating whether to compute average or summation,
defaults to average.
name: A name of the reduction operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same shape and type as `tensor`, averaged or summed
across all processes.
"""
c_in = tensor.handle
c_out = tensor.handle
if isinstance(name, string_types):
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
c_in, c_out, c_str(name), ctypes.c_bool(average),
ctypes.c_int(priority)))
else:
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
c_in, c_out, name, ctypes.c_bool(average),
ctypes.c_int(priority)))
return tensor
def allgather(tensor, name=None, priority=0):
"""
A function that concatenates the input tensor with the same input tensor on
all other Horovod processes. The input tensor is not modified.
The concatenation is done on the first dimension, so the input tensors on
the different processes must have the same rank and shape, except for the
first dimension, which is allowed to be different.
This acts as a thin wrapper around an autograd function. If your input
tensor requires gradients, then callings this function will allow gradients
to be computed and backpropagated.
Arguments:
tensor: A tensor to allgather.
name: A name of the allgather operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except
for the first dimension, which may be greater and is the sum of all
first dimensions of the tensors in different Horovod processes.
"""
assert(isinstance(tensor, mx.nd.NDArray))
output = mx.nd.zeros(shape=tensor.shape, ctx=tensor.context,
dtype=tensor.dtype)
c_in = tensor.handle
c_out = output.handle
if isinstance(name, string_types):
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, c_str(name), ctypes.c_int(priority)))
else:
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, name, ctypes.c_int(priority)))
return output
def broadcast(tensor, root_rank, name=None, priority=0):
"""
A function that broadcasts the input tensor on root rank to the same input
tensor on all other Horovod processes. The input tensor is not modified.
The broadcast operation is keyed by the name. If name is not provided, an
incremented auto-generated name is used. The tensor type and shape must be
the same on all Horovod processes for a given name. The broadcast will not
start until all processes are ready to send and receive the tensor.
This acts as a thin wrapper around an autograd function. If your input
tensor requires gradients, then callings this function will allow gradients
to be computed and backpropagated.
Arguments:
tensor: A tensor to broadcast.
root_rank: The rank to broadcast the value from.
name: A name of the broadcast operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same shape and type as `tensor`, with the value
broadcasted from root rank.
"""
output = mx.nd.zeros(shape=tensor.shape, ctx=tensor.context,
dtype=tensor.dtype)
c_in = tensor.handle
c_out = output.handle
if isinstance(name, string_types):
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
c_in, c_out, c_str(name), ctypes.c_int(root_rank),
ctypes.c_int(priority)))
else:
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
c_in, c_out, name, ctypes.c_int(root_rank),
ctypes.c_int(priority)))
return output
def broadcast_(tensor, root_rank, name=None, priority=0):
"""
A function that broadcasts the input tensor on root rank to the same input
tensor on all other Horovod processes. The operation is performed in-place.
The broadcast operation is keyed by the name. If name is not provided, an
incremented auto-generated name is used. The tensor type and shape must be
the same on all Horovod processes for a given name. The broadcast will not
start until all processes are ready to send and receive the tensor.
Arguments:
tensor: A tensor to broadcast.
root_rank: The rank to broadcast the value from.
name: A name of the broadcast operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same shape and type as `tensor`, with the value
broadcasted from root rank.
"""
c_in = tensor.handle
c_out = tensor.handle
if isinstance(name, string_types):
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
c_in, c_out, c_str(name), ctypes.c_int(root_rank),
ctypes.c_int(priority)))
else:
check_call(MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
c_in, c_out, name, ctypes.c_int(root_rank),
ctypes.c_int(priority)))
return tensor
| 40.995833
| 80
| 0.704035
| 1,414
| 9,839
| 4.747525
| 0.161952
| 0.01877
| 0.020855
| 0.027856
| 0.753017
| 0.750931
| 0.749143
| 0.733055
| 0.733055
| 0.728884
| 0
| 0.001721
| 0.232341
| 9,839
| 239
| 81
| 41.167364
| 0.887065
| 0.564387
| 0
| 0.619565
| 0
| 0
| 0.003617
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 1
| 0.054348
| false
| 0
| 0.097826
| 0
| 0.206522
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07bbe2b1aa9fb904fa03aafaef516ff019879db1
| 2,111
|
py
|
Python
|
TUNIBrain/University_API_requests.py
|
woroko/TUNIBot
|
e53dd66b11c732e2524a5022026156990fc9702a
|
[
"MIT"
] | null | null | null |
TUNIBrain/University_API_requests.py
|
woroko/TUNIBot
|
e53dd66b11c732e2524a5022026156990fc9702a
|
[
"MIT"
] | null | null | null |
TUNIBrain/University_API_requests.py
|
woroko/TUNIBot
|
e53dd66b11c732e2524a5022026156990fc9702a
|
[
"MIT"
] | null | null | null |
import requests, time
#Loads University API-data and saves it into json and xml files.
def save_API_data():
#Is there a delay between API calls or not
delay = False
#Implementation of study modules
#URL and API-key for the request
url = 'https://opendata.uta.fi:8443/apiman-gateway/UTA/opintojaksot/1.0'
headers = {'X-API-Key':'***REMOVED***', 'content-type': 'application/json'}
#Makes API-request and saves data into json-file.
r = requests.post(url, data="{}", headers=headers)
file = open("jsons/uta_course_implementations.json","w", encoding="utf-8")
file.write(r.content.decode('utf-8'))
file.close()
if(delay):
time.sleep(30)
#Open university studies
#URL and API-key for the request
url = 'https://opendata.uta.fi:8443/apiman-gateway/UTA/tarjontaavoin/1.0'
headers = {'X-API-Key':'***REMOVED***', 'content-type': 'application/json'}
#Makes API-request and saves data into xml-file.
r = requests.post(url, data="{}", headers=headers)
file = open("xmls/available_studies_at_open_university.xml","w")
file.write(r.content.decode('utf-8'))
file.close()
if(delay):
time.sleep(30)
#Exchange information
#URL and API-key for the request
url = 'https://opendata.uta.fi:8443/apiman-gateway/UTA/kvkohteet/1.0'
headers = {'X-API-Key':'***REMOVED***', 'content-type': 'application/json'}
#Makes API-request and saves data into json-file.
r = requests.post(url, data="{}", headers=headers)
file = open("jsons/exchange_destinations_and_programs.json","w")
file.write(r.content.decode('utf-8'))
file.close()
if(delay):
time.sleep(30)
#Cross-institutional studies
#URL and API-key for the request
url = 'https://opendata.uta.fi:8443/apiman-gateway/UTA/tarjontat3/1.0'
headers = {'X-API-Key':'***REMOVED***', 'content-type': 'application/json'}
#Makes API-request and saves data into xml-file.
r = requests.post(url, data="{}", headers=headers)
file = open("xmls/cross-institutional_studies.xml","w")
file.write(r.content.decode('utf-8'))
file.close()
save_API_data()
| 35.183333
| 78
| 0.676457
| 310
| 2,111
| 4.56129
| 0.26129
| 0.033946
| 0.028289
| 0.033946
| 0.722065
| 0.722065
| 0.722065
| 0.722065
| 0.722065
| 0.722065
| 0
| 0.020134
| 0.153008
| 2,111
| 59
| 79
| 35.779661
| 0.770694
| 0.245855
| 0
| 0.647059
| 0
| 0.117647
| 0.413181
| 0.103295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07d8501b967166ba513130165b51a34fc3df5995
| 167
|
py
|
Python
|
skhep/utils/__init__.py
|
scikit-hep/scikit-hep
|
b22d876b276ff88730b359309b64f123549c1f07
|
[
"BSD-3-Clause"
] | 150
|
2016-11-14T14:09:29.000Z
|
2022-03-18T16:37:03.000Z
|
skhep/utils/__init__.py
|
scikit-hep/scikit-hep
|
b22d876b276ff88730b359309b64f123549c1f07
|
[
"BSD-3-Clause"
] | 123
|
2017-01-30T10:03:04.000Z
|
2022-03-31T06:26:09.000Z
|
skhep/utils/__init__.py
|
scikit-hep/scikit-hep
|
b22d876b276ff88730b359309b64f123549c1f07
|
[
"BSD-3-Clause"
] | 41
|
2017-01-11T11:42:56.000Z
|
2021-12-06T22:38:32.000Z
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Module for miscellaneous and general utilities.
"""
from .exceptions import *
| 20.875
| 59
| 0.700599
| 22
| 167
| 5.318182
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014388
| 0.167665
| 167
| 7
| 60
| 23.857143
| 0.827338
| 0.766467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07e0219f4b208c5a23a96b4f1b282e05a91e80a0
| 655
|
py
|
Python
|
src/prefect/environments/execution/__init__.py
|
tedmiston/prefect
|
a2cb40c28c942b1d170db42a55bab99598a4dcd6
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-10T14:32:32.000Z
|
2020-05-10T14:32:32.000Z
|
src/prefect/environments/execution/__init__.py
|
tedmiston/prefect
|
a2cb40c28c942b1d170db42a55bab99598a4dcd6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/environments/execution/__init__.py
|
tedmiston/prefect
|
a2cb40c28c942b1d170db42a55bab99598a4dcd6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Execution environments encapsulate the logic for where your Flow should execute in Prefect Cloud.
Currently, we recommend all users deploy their Flow using the `RemoteEnvironment` configured with the
appropriate choice of executor.
"""
from prefect.environments.execution.base import Environment
from prefect.environments.execution.dask import DaskKubernetesEnvironment
from prefect.environments.execution.fargate import FargateTaskEnvironment
from prefect.environments.execution.k8s import KubernetesJobEnvironment
from prefect.environments.execution.local import LocalEnvironment
from prefect.environments.execution.remote import RemoteEnvironment
| 50.384615
| 101
| 0.865649
| 74
| 655
| 7.662162
| 0.567568
| 0.116402
| 0.243386
| 0.338624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001678
| 0.090076
| 655
| 12
| 102
| 54.583333
| 0.949664
| 0.354198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07f4c5293621d596f516737c6f52b2659e861b18
| 90
|
py
|
Python
|
app/reports/__init__.py
|
pmcheng/reportdiff_flask
|
5091cdf4d7c1a627ff262578440d67dcd1e47965
|
[
"Apache-2.0"
] | null | null | null |
app/reports/__init__.py
|
pmcheng/reportdiff_flask
|
5091cdf4d7c1a627ff262578440d67dcd1e47965
|
[
"Apache-2.0"
] | null | null | null |
app/reports/__init__.py
|
pmcheng/reportdiff_flask
|
5091cdf4d7c1a627ff262578440d67dcd1e47965
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
reports=Blueprint('reports',__name__)
from . import routes
| 22.5
| 38
| 0.788889
| 11
| 90
| 6.090909
| 0.636364
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 90
| 4
| 39
| 22.5
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0.079545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
ed0aaf2fdce40b2a4f476ea2a2aaabe6a10843af
| 295
|
py
|
Python
|
jwql/website/apps/jwql/monitor_pages/__init__.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 42
|
2018-10-03T13:38:18.000Z
|
2022-03-11T12:19:32.000Z
|
jwql/website/apps/jwql/monitor_pages/__init__.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 723
|
2018-08-29T18:29:49.000Z
|
2022-03-31T21:09:20.000Z
|
jwql/website/apps/jwql/monitor_pages/__init__.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 30
|
2018-08-29T18:17:32.000Z
|
2022-03-10T19:43:39.000Z
|
from .monitor_bad_pixel_bokeh import BadPixelMonitor
from .monitor_bias_bokeh import BiasMonitor
from .monitor_dark_bokeh import DarkMonitor
from .monitor_filesystem_bokeh import MonitorFilesystem
from .monitor_mast_bokeh import MastMonitor
from .monitor_readnoise_bokeh import ReadnoiseMonitor
| 42.142857
| 55
| 0.898305
| 37
| 295
| 6.810811
| 0.459459
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081356
| 295
| 6
| 56
| 49.166667
| 0.929889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed37e3d75025f33febb04ea7541c8c8fb224cf35
| 11,400
|
py
|
Python
|
tests/integration/test_materialize_mysql_database/test.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | 2
|
2019-09-05T17:17:27.000Z
|
2020-09-06T20:27:32.000Z
|
tests/integration/test_materialize_mysql_database/test.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_materialize_mysql_database/test.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | 1
|
2022-03-29T06:54:31.000Z
|
2022-03-29T06:54:31.000Z
|
import os
import os.path as p
import subprocess
import time
import pwd
import re
import pymysql.cursors
import pytest
from helpers.cluster import ClickHouseCluster, get_docker_compose_path
import docker
from . import materialize_with_ddl
DOCKER_COMPOSE_PATH = get_docker_compose_path()
cluster = ClickHouseCluster(__file__)
clickhouse_node = cluster.add_instance('node1', user_configs=["configs/users.xml"], with_mysql=False, stay_alive=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
class MySQLNodeInstance:
def __init__(self, user='root', password='clickhouse', ip_address='127.0.0.1', port=3308, docker_compose=None, project_name=cluster.project_name):
self.user = user
self.port = port
self.ip_address = ip_address
self.password = password
self.mysql_connection = None # lazy init
self.docker_compose = docker_compose
self.project_name = project_name
def alloc_connection(self):
if self.mysql_connection is None:
self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.ip_address,
port=self.port, autocommit=True)
else:
if self.mysql_connection.ping():
self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.ip_address,
port=self.port, autocommit=True)
return self.mysql_connection
def query(self, execution_query):
with self.alloc_connection().cursor() as cursor:
cursor.execute(execution_query)
def create_min_priv_user(self, user, password):
self.query("CREATE USER '" + user + "'@'%' IDENTIFIED BY '" + password + "'")
self.grant_min_priv_for_user(user)
def grant_min_priv_for_user(self, user, db='priv_err_db'):
self.query("GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO '" + user + "'@'%'")
self.query("GRANT SELECT ON " + db + ".* TO '" + user + "'@'%'")
def result(self, execution_query):
with self.alloc_connection().cursor() as cursor:
result = cursor.execute(execution_query)
if result is not None:
print(cursor.fetchall())
def query_and_get_data(self, executio_query):
with self.alloc_connection().cursor() as cursor:
cursor.execute(executio_query)
return cursor.fetchall()
def close(self):
if self.mysql_connection is not None:
self.mysql_connection.close()
def wait_mysql_to_start(self, timeout=60):
start = time.time()
while time.time() - start < timeout:
try:
self.alloc_connection()
print("Mysql Started")
return
except Exception as ex:
print("Can't connect to MySQL " + str(ex))
time.sleep(0.5)
subprocess.check_call(['docker-compose', 'ps', '--services', 'all'])
raise Exception("Cannot wait MySQL container")
@pytest.fixture(scope="module")
def started_mysql_5_7():
docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_5_7_for_materialize_mysql.yml')
mysql_node = MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', 3308, docker_compose)
try:
subprocess.check_call(
['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d'])
mysql_node.wait_mysql_to_start(120)
yield mysql_node
finally:
mysql_node.close()
subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'down', '--volumes',
'--remove-orphans'])
@pytest.fixture(scope="module")
def started_mysql_8_0():
docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_8_0_for_materialize_mysql.yml')
mysql_node = MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', 33308, docker_compose)
try:
subprocess.check_call(
['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d'])
mysql_node.wait_mysql_to_start(120)
yield mysql_node
finally:
mysql_node.close()
subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'down', '--volumes',
'--remove-orphans'])
def test_materialize_database_dml_with_mysql_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.dml_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
materialize_with_ddl.materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_5_7, "mysql1")
def test_materialize_database_dml_with_mysql_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.dml_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0")
materialize_with_ddl.materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_5_7):
try:
materialize_with_ddl.drop_table_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
materialize_with_ddl.create_table_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
materialize_with_ddl.rename_table_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
materialize_with_ddl.alter_add_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
"mysql1")
materialize_with_ddl.alter_drop_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
"mysql1")
# mysql 5.7 cannot support alter rename column
# materialize_with_ddl.alter_rename_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
materialize_with_ddl.alter_rename_table_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
"mysql1")
materialize_with_ddl.alter_modify_column_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7,
"mysql1")
except:
print((clickhouse_node.query(
"select '\n', thread_id, query_id, arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym from system.stack_trace format TSVRaw")))
raise
def test_materialize_database_ddl_with_mysql_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.drop_table_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0")
materialize_with_ddl.create_table_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0")
materialize_with_ddl.rename_table_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0")
materialize_with_ddl.alter_add_column_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0,
"mysql8_0")
materialize_with_ddl.alter_drop_column_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0,
"mysql8_0")
materialize_with_ddl.alter_rename_table_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0,
"mysql8_0")
materialize_with_ddl.alter_rename_column_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0,
"mysql8_0")
materialize_with_ddl.alter_modify_column_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0,
"mysql8_0")
def test_materialize_database_ddl_with_empty_transaction_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_5_7, "mysql1")
def test_materialize_database_ddl_with_empty_transaction_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_select_without_columns_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_5_7, "mysql1")
def test_select_without_columns_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_insert_with_modify_binlog_checksum_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_5_7, "mysql1")
def test_insert_with_modify_binlog_checksum_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_materialize_database_err_sync_user_privs_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.err_sync_user_privs_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql1")
def test_materialize_database_err_sync_user_privs_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.err_sync_user_privs_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_network_partition_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_5_7, "mysql1")
def test_network_partition_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_mysql_kill_sync_thread_restore_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_5_7, "mysql1")
def test_mysql_kill_sync_thread_restore_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_mysql_killed_while_insert_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql1")
def test_mysql_killed_while_insert_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql8_0")
def test_clickhouse_killed_while_insert_5_7(started_cluster, started_mysql_5_7):
materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql1")
def test_clickhouse_killed_while_insert_8_0(started_cluster, started_mysql_8_0):
materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql8_0")
| 50.220264
| 207
| 0.719298
| 1,463
| 11,400
| 5.094327
| 0.124402
| 0.093385
| 0.08936
| 0.125587
| 0.78143
| 0.763451
| 0.74896
| 0.716624
| 0.707098
| 0.703073
| 0
| 0.028268
| 0.199386
| 11,400
| 226
| 208
| 50.442478
| 0.78832
| 0.015175
| 0
| 0.245509
| 0
| 0.005988
| 0.092934
| 0.014613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191617
| false
| 0.035928
| 0.065868
| 0
| 0.281437
| 0.023952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed50d70748e98ace5f21f18ef65fcce0b964334f
| 123,249
|
py
|
Python
|
2. Existing Conversion Efficiency (EC), Appendix A/DL_FP_EC.py
|
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
|
0042fd4333212e65735f3643ecb59971d1bd9466
|
[
"MIT"
] | null | null | null |
2. Existing Conversion Efficiency (EC), Appendix A/DL_FP_EC.py
|
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
|
0042fd4333212e65735f3643ecb59971d1bd9466
|
[
"MIT"
] | null | null | null |
2. Existing Conversion Efficiency (EC), Appendix A/DL_FP_EC.py
|
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
|
0042fd4333212e65735f3643ecb59971d1bd9466
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#DL_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
t = range(0,tf,1)
c_firewood_energy_S1_Ac7 = df1_Ac7['Firewood_other_energy_use'].values
c_firewood_energy_S1_Ac18 = df1_Ac18['Firewood_other_energy_use'].values
c_firewood_energy_S1_Tgr40 = df1_Tgr40['Firewood_other_energy_use'].values
c_firewood_energy_S1_Tgr60 = df1_Tgr60['Firewood_other_energy_use'].values
c_firewood_energy_E_Hbr40 = dfE_Hbr40['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
c_pellets_Hbr_40y = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_7y(t,remainAGB_S1_Ac_7y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_Ac_7y
#set zero matrix
output_decomp_S1_Ac_7y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_Ac_7y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_Ac_7y[i:,i] = decomp_S1_Ac_7y(t[:len(t)-i],remain_part_S1_Ac_7y)
print(output_decomp_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_7y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_7y[:,i] = np.diff(output_decomp_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_7y[:,:4])
print(len(subs_matrix_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_7y = subs_matrix_S1_Ac_7y.clip(max=0)
print(subs_matrix_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_7y = abs(subs_matrix_S1_Ac_7y)
print(subs_matrix_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_7y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_Ac_7y)
subs_matrix_S1_Ac_7y = np.vstack((zero_matrix_S1_Ac_7y, subs_matrix_S1_Ac_7y))
print(subs_matrix_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_7y = (tf,1)
decomp_tot_S1_Ac_7y = np.zeros(matrix_tot_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_S1_Ac_7y[:,0] = decomp_tot_S1_Ac_7y[:,0] + subs_matrix_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_18y(t,remainAGB_S1_Ac_18y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_Ac_18y
#set zero matrix
output_decomp_S1_Ac_18y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_Ac_18y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_Ac_18y[i:,i] = decomp_S1_Ac_18y(t[:len(t)-i],remain_part_S1_Ac_18y)
print(output_decomp_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_18y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_18y[:,i] = np.diff(output_decomp_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_18y[:,:4])
print(len(subs_matrix_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_18y = subs_matrix_S1_Ac_18y.clip(max=0)
print(subs_matrix_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_18y = abs(subs_matrix_S1_Ac_18y)
print(subs_matrix_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_18y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_Ac_18y)
subs_matrix_S1_Ac_18y = np.vstack((zero_matrix_S1_Ac_18y, subs_matrix_S1_Ac_18y))
print(subs_matrix_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_18y = (tf,1)
decomp_tot_S1_Ac_18y = np.zeros(matrix_tot_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_S1_Ac_18y[:,0] = decomp_tot_S1_Ac_18y[:,0] + subs_matrix_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_40y(t,remainAGB_S1_Tgr_40y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_Tgr_40y
#set zero matrix
output_decomp_S1_Tgr_40y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_Tgr_40y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_40y[i:,i] = decomp_S1_Tgr_40y(t[:len(t)-i],remain_part_S1_Tgr_40y)
print(output_decomp_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_40y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_40y[:,i] = np.diff(output_decomp_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_40y[:,:4])
print(len(subs_matrix_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_40y = subs_matrix_S1_Tgr_40y.clip(max=0)
print(subs_matrix_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_40y = abs(subs_matrix_S1_Tgr_40y)
print(subs_matrix_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_40y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_Tgr_40y)
subs_matrix_S1_Tgr_40y = np.vstack((zero_matrix_S1_Tgr_40y, subs_matrix_S1_Tgr_40y))
print(subs_matrix_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_40y = (tf,1)
decomp_tot_S1_Tgr_40y = np.zeros(matrix_tot_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_S1_Tgr_40y[:,0] = decomp_tot_S1_Tgr_40y[:,0] + subs_matrix_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_S1_Tgr_40y[:,0])
#S1_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_60y(t,remainAGB_S1_Tgr_60y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_Tgr_60y
#set zero matrix
output_decomp_S1_Tgr_60y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_Tgr_60y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_60y[i:,i] = decomp_S1_Tgr_60y(t[:len(t)-i],remain_part_S1_Tgr_60y)
print(output_decomp_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_60y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_60y[:,i] = np.diff(output_decomp_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_60y[:,:4])
print(len(subs_matrix_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_60y = subs_matrix_S1_Tgr_60y.clip(max=0)
print(subs_matrix_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_60y = abs(subs_matrix_S1_Tgr_60y)
print(subs_matrix_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_60y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_Tgr_60y)
subs_matrix_S1_Tgr_60y = np.vstack((zero_matrix_S1_Tgr_60y, subs_matrix_S1_Tgr_60y))
print(subs_matrix_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_60y = (tf,1)
decomp_tot_S1_Tgr_60y = np.zeros(matrix_tot_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_S1_Tgr_60y[:,0] = decomp_tot_S1_Tgr_60y[:,0] + subs_matrix_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_E_Hbr_40y(t,remainAGB_E_Hbr_40y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_Hbr_40y
#set zero matrix
output_decomp_E_Hbr_40y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_Hbr_40y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_Hbr_40y[i:,i] = decomp_E_Hbr_40y(t[:len(t)-i],remain_part_E_Hbr_40y)
print(output_decomp_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_Hbr_40y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_Hbr_40y[:,i] = np.diff(output_decomp_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_E_Hbr_40y[:,:4])
print(len(subs_matrix_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_Hbr_40y = subs_matrix_E_Hbr_40y.clip(max=0)
print(subs_matrix_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_E_Hbr_40y = abs(subs_matrix_E_Hbr_40y)
print(subs_matrix_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_Hbr_40y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_Hbr_40y)
subs_matrix_E_Hbr_40y = np.vstack((zero_matrix_E_Hbr_40y, subs_matrix_E_Hbr_40y))
print(subs_matrix_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_Hbr_40y = (tf,1)
decomp_tot_E_Hbr_40y = np.zeros(matrix_tot_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_E_Hbr_40y[:,0] = decomp_tot_E_Hbr_40y[:,0] + subs_matrix_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
#product lifetime
#paper
P = 4
#furniture
F = 20
#building materials
B = 35
TestDSM1_Ac7 = DynamicStockModel(t = df1_Ac7['Year'].values, i = df1_Ac7['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([P]), 'StdDev': np.array([0.3*P])})
TestDSM1_Ac18 = DynamicStockModel(t = df1_Ac18['Year'].values, i = df1_Ac18['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([F]), 'StdDev': np.array([0.3*F])})
TestDSM1_Tgr40 = DynamicStockModel(t = df1_Tgr40['Year'].values, i = df1_Tgr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1_Tgr60 = DynamicStockModel(t = df1_Tgr60['Year'].values, i = df1_Tgr60['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME_Hbr40 = DynamicStockModel(t = dfE_Hbr40['Year'].values, i = dfE_Hbr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.dimension_check()
CheckStr1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.dimension_check()
CheckStr1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.dimension_check()
CheckStr1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.dimension_check()
CheckStrE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.dimension_check()
Stock_by_cohort1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_s_c_inflow_driven()
Stock_by_cohort1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_s_c_inflow_driven()
Stock_by_cohort1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_s_c_inflow_driven()
Stock_by_cohort1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_s_c_inflow_driven()
Stock_by_cohortE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_s_c_inflow_driven()
S1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_total()
S1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_total()
S1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_total()
S1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_total()
SE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_total()
O_C1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_o_c_from_s_c()
O_C1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_o_c_from_s_c()
O_C1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_o_c_from_s_c()
O_C1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_o_c_from_s_c()
O_CE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_o_c_from_s_c()
O1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_outflow_total()
O1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_outflow_total()
O1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_outflow_total()
O1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_outflow_total()
OE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_outflow_total()
DS1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_change()
DS1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_change()
DS1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_change()
DS1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_change()
DSE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_change()
Bal1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.check_stock_balance()
Bal1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.check_stock_balance()
Bal1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.check_stock_balance()
Bal1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.check_stock_balance()
BalE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.check_stock_balance()
#print output flow
print(TestDSM1_Ac7.o)
print(TestDSM1_Ac18.o)
print(TestDSM1_Tgr40.o)
print(TestDSM1_Tgr60.o)
print(TestDSME_Hbr40.o)
#%%
#Step (5): Biomass growth
## one-year gap between rotation cycle
# A. crassicarpa (Source: Anitha et al., 2015; Adiriono, 2009). Code: Ac
tf_Ac_7y = 8
tf_Ac_18y = 19
A1 = range(1,tf_Ac_7y,1)
A2 = range(1,tf_Ac_18y,1)
#calculate the biomass and carbon content of A. crassicarpa over time (7y)
def Y_Ac_7y(A1):
return 44/12*1000*np.exp(4.503-(2.559/A1))
output_Y_Ac_7y = np.array([Y_Ac_7y(A1i) for A1i in A1])
print(output_Y_Ac_7y)
#insert 0 value to the first element of the output result
output_Y_Ac_7y = np.insert(output_Y_Ac_7y,0,0)
print(output_Y_Ac_7y)
#calculate the biomass and carbon content of A. crassicarpa over time (18y)
def Y_Ac_18y(A2):
return 44/12*1000*np.exp(4.503-(2.559/A2))
output_Y_Ac_18y = np.array([Y_Ac_18y(A2i) for A2i in A2])
print(output_Y_Ac_18y)
#insert 0 value to the first element of the output result
output_Y_Ac_18y = np.insert(output_Y_Ac_18y,0,0)
print(output_Y_Ac_18y)
##26 times 8-year cycle (+1 year gap after the FP harvest)of new AGB of A. crassicarpa (7y), zero year gap between the cycle
counter_7y = range(0,26,1)
y_Ac_7y = []
for i in counter_7y:
y_Ac_7y.append(output_Y_Ac_7y)
flat_list_Ac_7y = []
for sublist in y_Ac_7y:
for item in sublist:
flat_list_Ac_7y.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_Ac_7y = flat_list_Ac_7y[:len(flat_list_Ac_7y)-7]
print(len(flat_list_Ac_7y))
##11 times 19-year cycle (+1 year gap after the FP harvest) of new AGB of A. crassicarpa (18y), zero year gap between the cycle
counter_18y = range(0,11,1)
y_Ac_18y = []
for i in counter_18y:
y_Ac_18y.append(output_Y_Ac_18y)
flat_list_Ac_18y = []
for sublist in y_Ac_18y:
for item in sublist:
flat_list_Ac_18y.append(item)
#the length of the list is now 209, so we remove the last 8 elements of the list to make the len=tf
flat_list_Ac_18y = flat_list_Ac_18y[:len(flat_list_Ac_18y)-8]
#####Check the flat list length for Hbr
## T. grandis (Source: Anitha et al., 2015; Adiriono, 2009). Code: Tgr
tf_Tgr_40y = 41
tf_Tgr_60y = 61
T1 = range(0,tf_Tgr_40y,1)
T2 = range(0,tf_Tgr_60y,1)
#calculate the biomass and carbon content of T. grandis over time (40y)
def Y_Tgr_40y(T1):
return 44/12*1000*2.114*(T1**0.941)
output_Y_Tgr_40y = np.array([Y_Tgr_40y(T1i) for T1i in T1])
print(output_Y_Tgr_40y)
#calculate the biomass and carbon content of T. grandis over time (60y)
def Y_Tgr_60y(T2):
return 44/12*1000*2.114*(T2**0.941)
output_Y_Tgr_60y = np.array([Y_Tgr_60y(T2i) for T2i in T2])
print(output_Y_Tgr_60y)
##5 times 41-year cycle of new AGB of T. grandis (40y), zero year gap between the cycle
counter_40y = range(0,5,1)
y_Tgr_40y = []
for i in counter_40y:
y_Tgr_40y.append(output_Y_Tgr_40y)
flat_list_Tgr_40y = []
for sublist in y_Tgr_40y:
for item in sublist:
flat_list_Tgr_40y.append(item)
#the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf
flat_list_Tgr_40y = flat_list_Tgr_40y[:len(flat_list_Tgr_40y)-4]
##4 times 60-year cycle of new AGB of T. grandis (60y), zero year gap between the cycle
counter_60y = range(0,4,1)
y_Tgr_60y = []
for i in counter_60y:
y_Tgr_60y.append(output_Y_Tgr_60y)
flat_list_Tgr_60y = []
for sublist in y_Tgr_60y:
for item in sublist:
flat_list_Tgr_60y.append(item)
#the length of the list is now 244, so we remove the last 43 elements of the list to make the len=tf
flat_list_Tgr_60y = flat_list_Tgr_60y[:len(flat_list_Tgr_60y)-43]
## H. brasiliensis (Source: Guillaume et al., 2018). Code: Hbr
tf_Hbr_40y = 41
H1 = range(0,tf_Hbr_40y,1)
#calculate the biomass and carbon content of H. brasiliensis over time (40y)
def Y_Hbr_40y(H1):
return 44/12*1000*1.55*H1
output_Y_Hbr_40y = np.array([Y_Hbr_40y(H1i) for H1i in H1])
print(output_Y_Hbr_40y)
##5 times 40-year cycle of new AGB of H. brasiliensis (40y), zero year gap between the cycle
counter_40y = range(0,5,1)
y_Hbr_40y = []
for i in counter_40y:
y_Hbr_40y.append(output_Y_Hbr_40y)
flat_list_Hbr_40y = []
for sublist in y_Hbr_40y:
for item in sublist:
flat_list_Hbr_40y.append(item)
#the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf
flat_list_Hbr_40y = flat_list_Hbr_40y[:len(flat_list_Hbr_40y)-4]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_Ac_7y, color='lightcoral')
plt.plot(t, flat_list_Ac_18y, color='deeppink')
plt.plot(t, flat_list_Hbr_40y, color='darkviolet')
plt.plot(t, flat_list_Tgr_40y)
plt.plot(t, flat_list_Tgr_60y, color='seagreen')
#plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha='0.4')
plt.xlabel('Time (year)')
plt.ylabel('AGB (tC/ha)')
plt.show()
##Yearly sequestration
## A. crassicarpa (7y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_7y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Ac_7y = [p - q for q, p in zip(flat_list_Ac_7y, flat_list_Ac_7y[1:])]
#since there is no sequestration between the replanting year (e.g., year 7 to 8), we have to replace negative numbers in 'flat_list_Ac_7y' with 0 values
flat_list_Ac_7y = [0 if i < 0 else i for i in flat_list_Ac_7y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Ac_7y.insert(0,var)
#make 'flat_list_Ac_7y' elements negative numbers to denote sequestration
flat_list_Ac_7y = [ -x for x in flat_list_Ac_7y]
print(flat_list_Ac_7y)
##A. crassicarpa (18y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_18y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Ac_18y = [t - u for u, t in zip(flat_list_Ac_18y, flat_list_Ac_18y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Ac_18y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Ac_18y = [0 if i < 0 else i for i in flat_list_Ac_18y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Ac_18y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Ac_18y = [ -x for x in flat_list_Ac_18y]
print(flat_list_Ac_18y)
##T. grandis (40y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Tgr_40y = [b - c for c, b in zip(flat_list_Tgr_40y, flat_list_Tgr_40y[1:])]
#since there is no sequestration between the replanting year (e.g., year 40 to 41), we have to replace negative numbers in 'flat_list_Tgr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Tgr_40y = [0 if i < 0 else i for i in flat_list_Tgr_40y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Tgr_40y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Tgr_40y = [-x for x in flat_list_Tgr_40y]
print(flat_list_Tgr_40y)
##T. grandis (60y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_60y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Tgr_60y = [k - l for l, k in zip(flat_list_Tgr_60y, flat_list_Tgr_60y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Tgr_60y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Tgr_60y = [0 if i < 0 else i for i in flat_list_Tgr_60y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Tgr_60y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Tgr_60y = [ -x for x in flat_list_Tgr_60y]
print(flat_list_Tgr_60y)
##H. brasiliensis (40y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Hbr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Hbr_40y = [c - d for d, c in zip(flat_list_Hbr_40y, flat_list_Hbr_40y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Hbr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Hbr_40y = [0 if i < 0 else i for i in flat_list_Hbr_40y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Hbr_40y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Hbr_40y = [ -x for x in flat_list_Hbr_40y]
print(flat_list_Hbr_40y)
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df1_Ac_7y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac_18y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
dfl_Tgr_60y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
t = range(0,tf,1)
PH_Emissions_HWP1_Ac_7y = df1_Ac_7y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Ac_18y = df1_Ac_18y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Tgr_40y = df1_Tgr_40y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Tgr_60y = dfl_Tgr_60y['PH_Emissions_HWP'].values
PH_Emissions_HWPE_Hbr_40y = dfE_Hbr_40y ['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Ac_7y(t,remainAGB_CH4_S1_Ac_7y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_7y
#set zero matrix
output_decomp_CH4_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Ac_7y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Ac_7y[i:,i] = decomp_CH4_S1_Ac_7y(t[:len(t)-i],remain_part_CH4_S1_Ac_7y)
print(output_decomp_CH4_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Ac_7y[:,i] = np.diff(output_decomp_CH4_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
print(len(subs_matrix_CH4_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Ac_7y = subs_matrix_CH4_S1_Ac_7y.clip(max=0)
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Ac_7y = abs(subs_matrix_CH4_S1_Ac_7y)
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Ac_7y)
subs_matrix_CH4_S1_Ac_7y = np.vstack((zero_matrix_CH4_S1_Ac_7y, subs_matrix_CH4_S1_Ac_7y))
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Ac_7y = (tf,1)
decomp_tot_CH4_S1_Ac_7y = np.zeros(matrix_tot_CH4_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Ac_7y[:,0] = decomp_tot_CH4_S1_Ac_7y[:,0] + subs_matrix_CH4_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Ac_18y(t,remainAGB_CH4_S1_Ac_18y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_18y
#set zero matrix
output_decomp_CH4_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Ac_18y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Ac_18y[i:,i] = decomp_CH4_S1_Ac_18y(t[:len(t)-i],remain_part_CH4_S1_Ac_18y)
print(output_decomp_CH4_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Ac_18y[:,i] = np.diff(output_decomp_CH4_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
print(len(subs_matrix_CH4_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Ac_18y = subs_matrix_CH4_S1_Ac_18y.clip(max=0)
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Ac_18y = abs(subs_matrix_CH4_S1_Ac_18y)
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Ac_18y)
subs_matrix_CH4_S1_Ac_18y = np.vstack((zero_matrix_CH4_S1_Ac_18y, subs_matrix_CH4_S1_Ac_18y))
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Ac_18y = (tf,1)
decomp_tot_CH4_S1_Ac_18y = np.zeros(matrix_tot_CH4_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Ac_18y[:,0] = decomp_tot_CH4_S1_Ac_18y[:,0] + subs_matrix_CH4_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Tgr_40y(t,remainAGB_CH4_S1_Tgr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_40y
#set zero matrix
output_decomp_CH4_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Tgr_40y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Tgr_40y[i:,i] = decomp_CH4_S1_Tgr_40y(t[:len(t)-i],remain_part_CH4_S1_Tgr_40y)
print(output_decomp_CH4_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Tgr_40y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
print(len(subs_matrix_CH4_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Tgr_40y = subs_matrix_CH4_S1_Tgr_40y.clip(max=0)
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Tgr_40y = abs(subs_matrix_CH4_S1_Tgr_40y)
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Tgr_40y)
subs_matrix_CH4_S1_Tgr_40y = np.vstack((zero_matrix_CH4_S1_Tgr_40y, subs_matrix_CH4_S1_Tgr_40y))
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Tgr_40y = (tf,1)
decomp_tot_CH4_S1_Tgr_40y = np.zeros(matrix_tot_CH4_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Tgr_40y[:,0] = decomp_tot_CH4_S1_Tgr_40y[:,0] + subs_matrix_CH4_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Tgr_40y[:,0])
#S1_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Tgr_60y(t,remainAGB_CH4_S1_Tgr_60y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_60y
#set zero matrix
output_decomp_CH4_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Tgr_60y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Tgr_60y[i:,i] = decomp_CH4_S1_Tgr_60y(t[:len(t)-i],remain_part_CH4_S1_Tgr_60y)
print(output_decomp_CH4_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Tgr_60y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
print(len(subs_matrix_CH4_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Tgr_60y = subs_matrix_CH4_S1_Tgr_60y.clip(max=0)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Tgr_60y = abs(subs_matrix_CH4_S1_Tgr_60y)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Tgr_60y)
subs_matrix_CH4_S1_Tgr_60y = np.vstack((zero_matrix_CH4_S1_Tgr_60y, subs_matrix_CH4_S1_Tgr_60y))
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Tgr_60y = (tf,1)
decomp_tot_CH4_S1_Tgr_60y = np.zeros(matrix_tot_CH4_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Tgr_60y[:,0] = decomp_tot_CH4_S1_Tgr_60y[:,0] + subs_matrix_CH4_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_CH4_E_Hbr_40y(t,remainAGB_CH4_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_E_Hbr_40y
#set zero matrix
output_decomp_CH4_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E_Hbr_40y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E_Hbr_40y[i:,i] = decomp_CH4_E_Hbr_40y(t[:len(t)-i],remain_part_CH4_E_Hbr_40y)
print(output_decomp_CH4_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E_Hbr_40y[:,i] = np.diff(output_decomp_CH4_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
print(len(subs_matrix_CH4_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E_Hbr_40y = subs_matrix_CH4_E_Hbr_40y.clip(max=0)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_CH4_E_Hbr_40y = abs(subs_matrix_CH4_E_Hbr_40y)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E_Hbr_40y)
subs_matrix_CH4_E_Hbr_40y = np.vstack((zero_matrix_CH4_E_Hbr_40y, subs_matrix_CH4_E_Hbr_40y))
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E_Hbr_40y = (tf,1)
decomp_tot_CH4_E_Hbr_40y = np.zeros(matrix_tot_CH4_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CH4_E_Hbr_40y[:,0] = decomp_tot_CH4_E_Hbr_40y[:,0] + subs_matrix_CH4_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CH4_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CH4_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CH4_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_7y(t,remainAGB_S1_Ac_7y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_7y
#set zero matrix
output_decomp_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_7y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_7y[i:,i] = decomp_S1_Ac_7y(t[:len(t)-i],remain_part_S1_Ac_7y)
print(output_decomp_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_7y[:,i] = np.diff(output_decomp_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_7y[:,:4])
print(len(subs_matrix_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_7y = subs_matrix_S1_Ac_7y.clip(max=0)
print(subs_matrix_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_7y = abs(subs_matrix_S1_Ac_7y)
print(subs_matrix_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_7y)
subs_matrix_S1_Ac_7y = np.vstack((zero_matrix_S1_Ac_7y, subs_matrix_S1_Ac_7y))
print(subs_matrix_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_7y = (tf,1)
decomp_tot_CO2_S1_Ac_7y = np.zeros(matrix_tot_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_7y[:,0] = decomp_tot_CO2_S1_Ac_7y[:,0] + subs_matrix_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_18y(t,remainAGB_S1_Ac_18y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_18y
#set zero matrix
output_decomp_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_18y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_18y[i:,i] = decomp_S1_Ac_18y(t[:len(t)-i],remain_part_S1_Ac_18y)
print(output_decomp_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_18y[:,i] = np.diff(output_decomp_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_18y[:,:4])
print(len(subs_matrix_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_18y = subs_matrix_S1_Ac_18y.clip(max=0)
print(subs_matrix_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_18y = abs(subs_matrix_S1_Ac_18y)
print(subs_matrix_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_18y)
subs_matrix_S1_Ac_18y = np.vstack((zero_matrix_S1_Ac_18y, subs_matrix_S1_Ac_18y))
print(subs_matrix_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_18y = (tf,1)
decomp_tot_CO2_S1_Ac_18y = np.zeros(matrix_tot_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_18y[:,0] = decomp_tot_CO2_S1_Ac_18y[:,0] + subs_matrix_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_40y(t,remainAGB_S1_Tgr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_40y
#set zero matrix
output_decomp_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_40y[i:,i] = decomp_S1_Tgr_40y(t[:len(t)-i],remain_part_S1_Tgr_40y)
print(output_decomp_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_40y[:,i] = np.diff(output_decomp_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_40y[:,:4])
print(len(subs_matrix_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_40y = subs_matrix_S1_Tgr_40y.clip(max=0)
print(subs_matrix_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_40y = abs(subs_matrix_S1_Tgr_40y)
print(subs_matrix_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_40y)
subs_matrix_S1_Tgr_40y = np.vstack((zero_matrix_S1_Tgr_40y, subs_matrix_S1_Tgr_40y))
print(subs_matrix_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_40y = (tf,1)
decomp_tot_CO2_S1_Tgr_40y = np.zeros(matrix_tot_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_40y[:,0] = decomp_tot_CO2_S1_Tgr_40y[:,0] + subs_matrix_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_40y[:,0])
#S2_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_60y(t,remainAGB_S1_Tgr_60y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_60y
#set zero matrix
output_decomp_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_60y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_60y[i:,i] = decomp_S1_Tgr_60y(t[:len(t)-i],remain_part_S1_Tgr_60y)
print(output_decomp_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_60y[:,i] = np.diff(output_decomp_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_60y[:,:4])
print(len(subs_matrix_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_60y = subs_matrix_S1_Tgr_60y.clip(max=0)
print(subs_matrix_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_60y = abs(subs_matrix_S1_Tgr_60y)
print(subs_matrix_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_60y)
subs_matrix_S1_Tgr_60y = np.vstack((zero_matrix_S1_Tgr_60y, subs_matrix_S1_Tgr_60y))
print(subs_matrix_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_60y = (tf,1)
decomp_tot_CO2_S1_Tgr_60y = np.zeros(matrix_tot_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_60y[:,0] = decomp_tot_CO2_S1_Tgr_60y[:,0] + subs_matrix_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_E_Hbr_40y(t,remainAGB_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_E_Hbr_40y
#set zero matrix
output_decomp_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_E_Hbr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_E_Hbr_40y[i:,i] = decomp_E_Hbr_40y(t[:len(t)-i],remain_part_E_Hbr_40y)
print(output_decomp_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_E_Hbr_40y[:,i] = np.diff(output_decomp_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_E_Hbr_40y[:,:4])
print(len(subs_matrix_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_Hbr_40y = subs_matrix_E_Hbr_40y.clip(max=0)
print(subs_matrix_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_E_Hbr_40y = abs(subs_matrix_E_Hbr_40y)
print(subs_matrix_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_E_Hbr_40y)
subs_matrix_E_Hbr_40y = np.vstack((zero_matrix_E_Hbr_40y, subs_matrix_E_Hbr_40y))
print(subs_matrix_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_Hbr_40y = (tf,1)
decomp_tot_CO2_E_Hbr_40y = np.zeros(matrix_tot_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CO2_E_Hbr_40y[:,0] = decomp_tot_CO2_E_Hbr_40y[:,0] + subs_matrix_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CO2_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CO2_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y[:,0], TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, decomp_tot_CO2_S1_Ac_7y[:,0]]
Emissions_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y[:,0], TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, decomp_tot_CO2_S1_Ac_18y[:,0]]
Emissions_S1_Tgr_40y = [c_firewood_energy_S1_Tgr40, decomp_tot_S1_Tgr_40y[:,0], TestDSM1_Tgr40.o, PH_Emissions_HWP1_Tgr_40y, decomp_tot_CO2_S1_Tgr_40y[:,0]]
Emissions_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y[:,0], TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y[:,0]]
Emissions_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y[:,0], TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, decomp_tot_CO2_E_Hbr_40y[:,0]]
Emissions_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Emissions_S1_Ac_7y)]
Emissions_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Emissions_S1_Ac_18y)]
Emissions_DL_FP_S1_Tgr_40y = [sum(x) for x in zip(*Emissions_S1_Tgr_40y)]
Emissions_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Emissions_S1_Tgr_60y)]
Emissions_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Emissions_E_Hbr_40y)]
#CH4_S1_Ac_7y
Emissions_CH4_DL_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y[:,0]
#CH4_S1_Ac_18y
Emissions_CH4_DL_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y[:,0]
#CH4_S1_Tgr_40y
Emissions_CH4_DL_FP_S1_Tgr_40y = decomp_tot_CH4_S1_Tgr_40y[:,0]
#CH4_S1_Tgr_60y
Emissions_CH4_DL_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y[:,0]
#CH4_E_Hbr_40y
Emissions_CH4_DL_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1_Ac_7y = Emissions_DL_FP_S1_Ac_7y
Col2_S1_Ac_18y = Emissions_DL_FP_S1_Ac_18y
Col2_S1_Tgr_40y = Emissions_DL_FP_S1_Tgr_40y
Col2_S1_Tgr_60y = Emissions_DL_FP_S1_Tgr_60y
Col2_E_Hbr_40y = Emissions_DL_FP_E_Hbr_40y
Col3_S1_Ac_7y = Emissions_CH4_DL_FP_S1_Ac_7y
Col3_S1_Ac_18y = Emissions_CH4_DL_FP_S1_Ac_18y
Col3_S1_Tgr_40y = Emissions_CH4_DL_FP_S1_Tgr_40y
Col3_S1_Tgr_60y = Emissions_CH4_DL_FP_S1_Tgr_60y
Col3_E_Hbr_40y = Emissions_CH4_DL_FP_E_Hbr_40y
Col4 = Emission_ref
Col5 = flat_list_Ac_7y
Col6 = flat_list_Ac_18y
Col7 = flat_list_Tgr_40y
Col8 = flat_list_Tgr_60y
Col9 = flat_list_Hbr_40y
#A. crassicarpa
df1_Ac_7y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_7y,'kg_CH4':Col3_S1_Ac_7y,'kg_CO2_seq':Col5,'emission_ref':Col4})
df1_Ac_18y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_18y,'kg_CH4':Col3_S1_Ac_18y,'kg_CO2_seq':Col6,'emission_ref':Col4})
#T. grandis
df1_Tgr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_40y,'kg_CH4':Col3_S1_Tgr_40y,'kg_CO2_seq':Col7,'emission_ref':Col4})
df1_Tgr_60y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_60y,'kg_CH4':Col3_S1_Tgr_60y,'kg_CO2_seq':Col8,'emission_ref':Col4})
#H. brasiliensis
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_Hbr_40y,'kg_CH4':Col3_E_Hbr_40y,'kg_CO2_seq':Col9,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_DL_FP_S1.xlsx', engine = 'xlsxwriter')
df1_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_7y', header=True, index=False )
df1_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_18y', header=True, index=False)
df1_Tgr_40y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_40y', header=True, index=False)
df1_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_60y', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_Hbr_40y', header=True, index=False)
writer.save()
writer.close()
#df1.to_excel('test.xlsx', 'nuclues', header=True, index=False)
#df2.to_excel('test.xlsx', 'plasma', header=True, index=False)
#%%
## DYNAMIC LCA
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S1_Ac_7y
df = pd.read_excel('emissions_seq_DL_FP_S1.xlsx', 'DL_FP_S1_Ac_7y') # can also index sheet by name or fetch all sheets
emission_CO2_S1_Ac_7y = df['kg_CO2'].tolist()
emission_CH4_S1_Ac_7y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Ac_7y = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_Ac_18y
df = pd.read_excel('emissions_seq_DL_FP_S1.xlsx', 'DL_FP_S1_Ac_18y')
emission_CO2_S1_Ac_18y = df['kg_CO2'].tolist()
emission_CH4_S1_Ac_18y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Ac_18y = df['kg_CO2_seq'].tolist()
#read S1_Tgr_40y
df = pd.read_excel('emissions_seq_DL_FP_S1.xlsx', 'DL_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets
emission_CO2_S1_Tgr_40y = df['kg_CO2'].tolist()
emission_CH4_S1_Tgr_40y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Tgr_40y = df['kg_CO2_seq'].tolist()
#read S1_Tgr_60y
df = pd.read_excel('emissions_seq_DL_FP_S1.xlsx', 'DL_FP_S1_Tgr_60y')
emission_CO2_S1_Tgr_60y = df['kg_CO2'].tolist()
emission_CH4_S1_Tgr_60y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Tgr_60y = df['kg_CO2_seq'].tolist()
#read E_Hbr_40y
df = pd.read_excel('emissions_seq_DL_FP_S1.xlsx', 'DL_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets
emission_CO2_E_Hbr_40y = df['kg_CO2'].tolist()
emission_CH4_E_Hbr_40y = df['kg_CH4'].tolist()
emission_CO2_seq_E_Hbr_40y = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S1_Ac_7y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
emissions_NonRW_S1_Ac_7y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Ac_7y_seq = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_Ac_18y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
emissions_NonRW_S1_Ac_18y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Ac_18y_seq = df['kg_CO2_seq'].tolist()
#read S1_Tgr_40y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets
emissions_NonRW_S1_Tgr_40y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Tgr_40y_seq = df['kg_CO2_seq'].tolist()
#read S1_Tgr_60y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
emissions_NonRW_S1_Tgr_60y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Tgr_60y_seq = df['kg_CO2_seq'].tolist()
#read E_Hbr_40y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets
emissions_NonRW_E_Hbr_40y = df['NonRW_emissions'].tolist()
emissions_NonRW_E_Hbr_40y_seq = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
# Step (16): Calculate instantaneous global warming impact (GWI)
##Wood-based
#S1_Ac_7y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Ac_7y = (tf-1,3)
GWI_inst_S1_Ac_7y = np.zeros(matrix_GWI_S1_Ac_7y)
for t in range(0,tf-1):
GWI_inst_S1_Ac_7y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_7y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Ac_7y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_7y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Ac_7y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_7y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Ac_7y = (tf-1,1)
GWI_inst_tot_S1_Ac_7y = np.zeros(matrix_GWI_tot_S1_Ac_7y)
GWI_inst_tot_S1_Ac_7y[:,0] = np.array(GWI_inst_S1_Ac_7y[:,0] + GWI_inst_S1_Ac_7y[:,1] + GWI_inst_S1_Ac_7y[:,2])
print(GWI_inst_tot_S1_Ac_7y[:,0])
t = np.arange(0,tf-1,1)
#S1_Ac_18y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Ac_18y = (tf-1,3)
GWI_inst_S1_Ac_18y = np.zeros(matrix_GWI_S1_Ac_18y)
for t in range(0,tf-1):
GWI_inst_S1_Ac_18y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_18y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Ac_18y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_18y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Ac_18y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_18y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Ac_18y = (tf-1,1)
GWI_inst_tot_S1_Ac_18y = np.zeros(matrix_GWI_tot_S1_Ac_18y)
GWI_inst_tot_S1_Ac_18y[:,0] = np.array(GWI_inst_S1_Ac_18y[:,0] + GWI_inst_S1_Ac_18y[:,1] + GWI_inst_S1_Ac_18y[:,2])
print(GWI_inst_tot_S1_Ac_18y[:,0])
#S1_Tgr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Tgr_40y = (tf-1,3)
GWI_inst_S1_Tgr_40y = np.zeros(matrix_GWI_S1_Tgr_40y)
for t in range(0,tf-1):
GWI_inst_S1_Tgr_40y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_40y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Tgr_40y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_40y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Tgr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_40y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Tgr_40y = (tf-1,1)
GWI_inst_tot_S1_Tgr_40y = np.zeros(matrix_GWI_tot_S1_Tgr_40y)
GWI_inst_tot_S1_Tgr_40y[:,0] = np.array(GWI_inst_S1_Tgr_40y[:,0] + GWI_inst_S1_Tgr_40y[:,1] + GWI_inst_S1_Tgr_40y[:,2])
print(GWI_inst_tot_S1_Tgr_40y[:,0])
#S1_Tgr_60y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Tgr_60y = (tf-1,3)
GWI_inst_S1_Tgr_60y = np.zeros(matrix_GWI_S1_Tgr_60y)
for t in range(0,tf-1):
GWI_inst_S1_Tgr_60y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_60y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Tgr_60y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_60y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Tgr_60y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_60y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Tgr_60y = (tf-1,1)
GWI_inst_tot_S1_Tgr_60y = np.zeros(matrix_GWI_tot_S1_Tgr_60y)
GWI_inst_tot_S1_Tgr_60y[:,0] = np.array(GWI_inst_S1_Tgr_60y[:,0] + GWI_inst_S1_Tgr_60y[:,1] + GWI_inst_S1_Tgr_60y[:,2])
print(GWI_inst_tot_S1_Tgr_60y[:,0])
#E_Hbr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_E_Hbr_40y = (tf-1,3)
GWI_inst_E_Hbr_40y = np.zeros(matrix_GWI_E_Hbr_40y)
for t in range(0,tf-1):
GWI_inst_E_Hbr_40y[t,0] = np.sum(np.multiply(emission_CO2_E_Hbr_40y,DCF_CO2_ti[:,t]))
GWI_inst_E_Hbr_40y[t,1] = np.sum(np.multiply(emission_CH4_E_Hbr_40y,DCF_CH4_ti[:,t]))
GWI_inst_E_Hbr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_E_Hbr_40y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_E_Hbr_40y = (tf-1,1)
GWI_inst_tot_E_Hbr_40y = np.zeros(matrix_GWI_tot_E_Hbr_40y)
GWI_inst_tot_E_Hbr_40y[:,0] = np.array(GWI_inst_E_Hbr_40y[:,0] + GWI_inst_E_Hbr_40y[:,1] + GWI_inst_E_Hbr_40y[:,2])
print(GWI_inst_tot_E_Hbr_40y[:,0])
##NonRW
#S1_Ac_7y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Ac_7y = (tf-1,2)
GWI_inst_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_NonRW_S1_Ac_7y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Ac_7y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Ac_7y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Ac_7y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_7y)
GWI_inst_tot_NonRW_S1_Ac_7y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_7y[:,0] + GWI_inst_NonRW_S1_Ac_7y[:,1])
print(GWI_inst_tot_NonRW_S1_Ac_7y[:,0])
#S1_Ac_18y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Ac_18y = (tf-1,2)
GWI_inst_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_NonRW_S1_Ac_18y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Ac_18y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Ac_18y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Ac_18y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_18y)
GWI_inst_tot_NonRW_S1_Ac_18y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_18y[:,0] + GWI_inst_NonRW_S1_Ac_18y[:,1])
print(GWI_inst_tot_NonRW_S1_Ac_18y[:,0])
#S1_Tgr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Tgr_40y = (tf-1,2)
GWI_inst_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_NonRW_S1_Tgr_40y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Tgr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Tgr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Tgr_40y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_40y)
GWI_inst_tot_NonRW_S1_Tgr_40y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_40y[:,0] + GWI_inst_NonRW_S1_Tgr_40y[:,1])
print(GWI_inst_tot_NonRW_S1_Tgr_40y[:,0])
#S1_Tgr_60y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Tgr_60y = (tf-1,2)
GWI_inst_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_NonRW_S1_Tgr_60y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Tgr_60y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Tgr_60y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Tgr_60y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_60y)
GWI_inst_tot_NonRW_S1_Tgr_60y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_60y[:,0] + GWI_inst_NonRW_S1_Tgr_60y[:,1])
print(GWI_inst_tot_NonRW_S1_Tgr_60y[:,0])
#E_Hbr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_E_Hbr_40y = (tf-1,2)
GWI_inst_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_NonRW_E_Hbr_40y)
for t in range(0,tf-1):
GWI_inst_NonRW_E_Hbr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_E_Hbr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_E_Hbr_40y = (tf-1,1)
GWI_inst_tot_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_tot_NonRW_E_Hbr_40y)
GWI_inst_tot_NonRW_E_Hbr_40y[:,0] = np.array(GWI_inst_NonRW_E_Hbr_40y[:,0] + GWI_inst_NonRW_E_Hbr_40y[:,1])
print(GWI_inst_tot_NonRW_E_Hbr_40y[:,0])
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_7y for item in sublist])
GWI_inst_tot_NonRW_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_18y for item in sublist])
GWI_inst_tot_NonRW_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Tgr_60y for item in sublist])
GWI_inst_tot_NonRW_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_NonRW_E_Hbr_40y for item in sublist])
GWI_inst_tot_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_S1_Ac_7y for item in sublist])
GWI_inst_tot_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_S1_Ac_18y for item in sublist])
GWI_inst_tot_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_S1_Tgr_60y for item in sublist])
GWI_inst_tot_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_E_Hbr_40y for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
plt.plot(t, GWI_inst_tot_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#plt.plot(t, GWI_inst_tot_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
plt.plot(t, GWI_inst_tot_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
plt.plot(t, GWI_inst_tot_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_E_Hbr_40y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
#plt.fill_between(t, GWI_inst_tot_NonRW_S1_Ac_7y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
plt.ylim(-1e-9,1.4e-9)
plt.title('Instantaneous GWI, DL_FP_EC')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-12}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')#
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_DL_FP_S1', dpi=300)
plt.show()
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
##Wood-based
GWI_cum_S1_Ac_7y = np.cumsum(GWI_inst_tot_S1_Ac_7y)
GWI_cum_S1_Ac_18y = np.cumsum(GWI_inst_tot_S1_Ac_18y)
GWI_cum_S1_Tgr_40y = np.cumsum(GWI_inst_tot_S1_Tgr_40y)
GWI_cum_S1_Tgr_60y = np.cumsum(GWI_inst_tot_S1_Tgr_60y)
GWI_cum_E_Hbr_40y = np.cumsum(GWI_inst_tot_E_Hbr_40y)
##NonRW
GWI_cum_NonRW_S1_Ac_7y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_7y)
GWI_cum_NonRW_S1_Ac_18y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_18y)
GWI_cum_NonRW_S1_Tgr_40y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_40y)
GWI_cum_NonRW_S1_Tgr_60y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_60y)
GWI_cum_NonRW_E_Hbr_40y = np.cumsum(GWI_inst_tot_NonRW_E_Hbr_40y)
#print(GWI_cum_NonRW_S1_Ac_18y)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-10}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
plt.ylim(-1e-7,1.5e-7)
plt.title('Cumulative GWI, DL_FP_EC')
plt.plot(t, GWI_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#plt.plot(t, GWI_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
plt.plot(t, GWI_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#plt.plot(t, GWI_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
plt.plot(t, GWI_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
plt.plot(t, GWI_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
plt.grid(True)
#plt.fill_between(t, GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_NonRW_S1_Ac_7y, color='lightcoral', alpha=0.3)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_DL_FP_EC', dpi=300)
plt.show()
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_ref)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
##Wood-based
GWP_dyn_cum_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_7y, GWI_cum_ref)]
GWP_dyn_cum_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_18y, GWI_cum_ref)]
GWP_dyn_cum_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_40y, GWI_cum_ref)]
GWP_dyn_cum_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_60y, GWI_cum_ref)]
GWP_dyn_cum_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_E_Hbr_40y, GWI_cum_ref)]
##NonRW
GWP_dyn_cum_NonRW_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_7y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_18y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_40y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_E_Hbr_40y, GWI_cum_ref)]
#print(GWP_dyn_cum_NonRW_S1_Ac_18y)
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
ax.plot(t, GWP_dyn_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#ax.plot(t, GWP_dyn_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
ax.plot(t, GWP_dyn_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
ax.plot(t, GWP_dyn_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_S1_Ac_7y, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlim(0,200)
ax.set_ylim(-750,1000)
#ax.set_ylim(-600,1500)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_title('Dynamic GWP, DL_FP_EC')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_DL_FP_S1', dpi=300)
plt.draw()
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_1 = GWI_inst_tot_S1_Ac_7y
Col_GI_2 = GWI_inst_tot_S1_Ac_18y
Col_GI_3 = GWI_inst_tot_S1_Tgr_60y
Col_GI_4 = GWI_inst_tot_E_Hbr_40y
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_5 = GWI_inst_tot_NonRW_S1_Ac_7y
Col_GI_6 = GWI_inst_tot_NonRW_S1_Ac_18y
Col_GI_7 = GWI_inst_tot_NonRW_S1_Tgr_60y
Col_GI_8 = GWI_inst_tot_NonRW_E_Hbr_40y
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_1 = GWI_cum_S1_Ac_7y
Col_GC_2 = GWI_cum_S1_Ac_18y
Col_GC_3 = GWI_cum_S1_Tgr_60y
Col_GC_4 = GWI_cum_E_Hbr_40y
#GWI_cumulative from counter use scenarios
Col_GC_5 = GWI_cum_NonRW_S1_Ac_7y
Col_GC_6 = GWI_cum_NonRW_S1_Ac_18y
Col_GC_7 = GWI_cum_NonRW_S1_Tgr_60y
Col_GC_8 = GWI_cum_NonRW_E_Hbr_40y
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_1 = GWP_dyn_cum_S1_Ac_7y
Col_GWP_2 = GWP_dyn_cum_S1_Ac_18y
Col_GWP_3 = GWP_dyn_cum_S1_Tgr_60y
Col_GWP_4 = GWP_dyn_cum_E_Hbr_40y
#GWPdyn from counter use scenarios
Col_GWP_5 = GWP_dyn_cum_NonRW_S1_Ac_7y
Col_GWP_6 = GWP_dyn_cum_NonRW_S1_Ac_18y
Col_GWP_7 = GWP_dyn_cum_NonRW_S1_Tgr_60y
Col_GWP_8 = GWP_dyn_cum_NonRW_E_Hbr_40y
#Create colum results
dfM_EC_GI = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GI_1, 'M_EC_Ac_18y (W/m2)':Col_GI_2,
'M_EC_Tgr_60y (W/m2)':Col_GI_3, 'E_EC_Hbr_40y (W/m2)':Col_GI_4,
'NR_M_EC_Ac_7y (W/m2)':Col_GI_5, 'NR_M_EC_Ac_18y (W/m2)':Col_GI_6,
'NR_M_EC_Tgr_60y (W/m2)':Col_GI_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GI_8})
dfM_EC_GC = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GC_1, 'M_EC_Ac_18y (W/m2)':Col_GC_2,
'M_EC_Tgr_60y (W/m2)':Col_GC_3, 'E_EC_Hbr_40y (W/m2)':Col_GC_4,
'NR_M_EC_Ac_7y (W/m2)':Col_GC_5, 'NR_M_EC_Ac_18y (W/m2)':Col_GC_6,
'NR_M_EC_Tgr_60y (W/m2)':Col_GC_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GC_8})
dfM_EC_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (t-CO2-eq)':Col_GWP_1, 'M_EC_Ac_18y (t-CO2-eq)':Col_GWP_2,
'M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_3, 'E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_4,
'NR_M_EC_Ac_7y (t-CO2-eq)':Col_GWP_5, 'NR_M_EC_Ac_18y (t-CO2-eq)':Col_GWP_6,
'NR_M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_7, 'NR_E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_8})
#Export to excel
writer = pd.ExcelWriter('GraphResults_DL_FP_EC.xlsx', engine = 'xlsxwriter')
dfM_EC_GI.to_excel(writer, sheet_name = 'GWI_Inst_DL_FP_EC', header=True, index=False )
dfM_EC_GC.to_excel(writer, sheet_name = 'Cumulative GWI_DL_FP_EC', header=True, index=False )
dfM_EC_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_DL_FP_EC', header=True, index=False )
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
division = 1000*44/12
division_CH4 = 1000*16/12
#M_Ac_7y
c_firewood_energy_S1_Ac7 = [x/division for x in c_firewood_energy_S1_Ac7]
decomp_tot_S1_Ac_7y[:,0] = [x/division for x in decomp_tot_S1_Ac_7y[:,0]]
TestDSM1_Ac7.o = [x/division for x in TestDSM1_Ac7.o]
PH_Emissions_HWP1_Ac_7y = [x/division for x in PH_Emissions_HWP1_Ac_7y]
#OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7]
flat_list_Ac_7y = [x/division for x in flat_list_Ac_7y]
decomp_tot_CO2_S1_Ac_7y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_7y[:,0]]
decomp_tot_CH4_S1_Ac_7y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_7y[:,0]]
#M_Ac_18y
c_firewood_energy_S1_Ac18 = [x/division for x in c_firewood_energy_S1_Ac18]
decomp_tot_S1_Ac_18y[:,0] = [x/division for x in decomp_tot_S1_Ac_18y[:,0]]
TestDSM1_Ac18.o = [x/division for x in TestDSM1_Ac18.o]
PH_Emissions_HWP1_Ac_18y = [x/division for x in PH_Emissions_HWP1_Ac_18y]
#OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18]
flat_list_Ac_18y = [x/division for x in flat_list_Ac_18y]
decomp_tot_CO2_S1_Ac_18y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_18y[:,0]]
decomp_tot_CH4_S1_Ac_18y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_18y[:,0]]
#M_Tgr_60y
c_firewood_energy_S1_Tgr60 = [x/division for x in c_firewood_energy_S1_Tgr60]
decomp_tot_S1_Tgr_60y[:,0] = [x/division for x in decomp_tot_S1_Tgr_60y[:,0]]
TestDSM1_Tgr60.o = [x/division for x in TestDSM1_Tgr60.o]
PH_Emissions_HWP1_Tgr_60y = [x/division for x in PH_Emissions_HWP1_Tgr_60y]
#OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60]
flat_list_Tgr_60y = [x/division for x in flat_list_Tgr_60y]
decomp_tot_CO2_S1_Tgr_60y[:,0] = [x/division for x in decomp_tot_CO2_S1_Tgr_60y[:,0]]
decomp_tot_CH4_S1_Tgr_60y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Tgr_60y[:,0]]
#E_Hbr_40y
c_firewood_energy_E_Hbr40 = [x/division for x in c_firewood_energy_E_Hbr40]
c_pellets_Hbr_40y = [x/division for x in c_pellets_Hbr_40y]
decomp_tot_E_Hbr_40y[:,0] = [x/division for x in decomp_tot_E_Hbr_40y[:,0]]
TestDSME_Hbr40.o = [x/division for x in TestDSME_Hbr40.o]
PH_Emissions_HWPE_Hbr_40y = [x/division for x in PH_Emissions_HWPE_Hbr_40y]
##ColumnOC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40]
flat_list_Hbr_40y = [x/division for x in flat_list_Hbr_40y]
decomp_tot_CO2_E_Hbr_40y[:,0] = [x/division for x in decomp_tot_CO2_E_Hbr_40y[:,0]]
decomp_tot_CH4_E_Hbr_40y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_E_Hbr_40y[:,0]]
#landfill aggregate flows
Landfill_decomp_DL_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y, decomp_tot_CO2_S1_Ac_7y
Landfill_decomp_DL_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y, decomp_tot_CO2_S1_Ac_18y
Landfill_decomp_DL_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y
Landfill_decomp_DL_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y, decomp_tot_CO2_E_Hbr_40y
Landfill_decomp_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Ac_7y)]
Landfill_decomp_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Ac_18y)]
Landfill_decomp_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Tgr_60y)]
Landfill_decomp_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_E_Hbr_40y)]
Landfill_decomp_DL_FP_S1_Ac_7y = [item for sublist in Landfill_decomp_DL_FP_S1_Ac_7y for item in sublist]
Landfill_decomp_DL_FP_S1_Ac_18y = [item for sublist in Landfill_decomp_DL_FP_S1_Ac_18y for item in sublist]
Landfill_decomp_DL_FP_S1_Tgr_60y = [item for sublist in Landfill_decomp_DL_FP_S1_Tgr_60y for item in sublist]
Landfill_decomp_DL_FP_E_Hbr_40y = [item for sublist in Landfill_decomp_DL_FP_E_Hbr_40y for item in sublist]
#M_Ac_7y
Column1 = year
Column2 = c_firewood_energy_S1_Ac7
Column3 = decomp_tot_S1_Ac_7y[:,0]
Column4 = TestDSM1_Ac7.o
Column5 = PH_Emissions_HWP1_Ac_7y
#Column6_1 = OC_storage_S1_Ac7
Column6 = Landfill_decomp_DL_FP_S1_Ac_7y
Column7 = flat_list_Ac_7y
#M_Ac_18y
Column8 = c_firewood_energy_S1_Ac18
Column9 = decomp_tot_S1_Ac_18y[:,0]
Column10 = TestDSM1_Ac18.o
Column11 = PH_Emissions_HWP1_Ac_18y
#Column12_1 = OC_storage_S1_Ac18
Column12 = Landfill_decomp_DL_FP_S1_Ac_18y
Column13 = flat_list_Ac_18y
#M_Tgr_60y
Column14 = c_firewood_energy_S1_Tgr60
Column15 = decomp_tot_S1_Tgr_60y[:,0]
Column16 = TestDSM1_Tgr60.o
Column17 = PH_Emissions_HWP1_Tgr_60y
#Column18_1 = OC_storage_S1_Tgr60
Column18 = Landfill_decomp_DL_FP_S1_Tgr_60y
Column19 = flat_list_Tgr_60y
#E_Hbr_40y
Column20 = c_firewood_energy_E_Hbr40
Column20_1 = c_pellets_Hbr_40y
Column21 = decomp_tot_E_Hbr_40y[:,0]
Column22 = TestDSME_Hbr40.o
Column23 = PH_Emissions_HWPE_Hbr_40y
#Column24_1 = OC_storage_E_Hbr40
Column24 = Landfill_decomp_DL_FP_E_Hbr_40y
Column25 = flat_list_Hbr_40y
#create columns
dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column7,
# '9: Landfill storage (t-C)':Column6_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column2,
'F8-0: Operational stage/processing emissions (t-C)':Column5,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column4,
'F7-0: Landfill gas decomposition (t-C)':Column6})
dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column13,
# '9: Landfill storage (t-C)':Column12_1,
'F1-0: Residue decomposition (t-C)':Column9,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column8,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column12})
dfE_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column19,
# '9: Landfill storage (t-C)':Column18_1,
'F1-0: Residue decomposition (t-C)':Column15,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column14,
'F8-0: Operational stage/processing emissions (t-C)':Column17,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16,
'F7-0: Landfill gas decomposition (t-C)':Column18})
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column25,
# '9: Landfill storage (t-C)':Column24_1,
'F1-0: Residue decomposition (t-C)':Column21,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20,
'F8-0: Operational stage/processing emissions (t-C)':Column23,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column22,
'F7-0: Landfill gas decomposition (t-C)':Column24,
'F4-0: Emissions from wood pellets use (t-C)':Column20_1})
writer = pd.ExcelWriter('C_flows_DL_FP_EC.xlsx', engine = 'xlsxwriter')
dfM_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_M_Ac_7y (EC)', header=True, index=False)
dfM_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_M_Ac_18y (EC)', header=True, index=False)
dfE_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_M_Tgr_60y (EC)', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_Hbr_40y (EC)', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#DL_FP_M_EC_Ac_7y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1_s=fig.add_subplot(111)
#plot
ax1_s.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1_s.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage')
ax1_s.plot(t, decomp_tot_S1_Ac_7y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1_s.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1_s.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions')
ax1_s.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1_s.plot(t, Landfill_decomp_DL_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition')
ax1_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1_s.set_xlim(-1,200)
ax1_s.set_yscale('symlog')
ax1_s.set_xlabel('Time (year)')
ax1_s.set_ylabel('C flows (t-C) (symlog)')
ax1_s.set_title('Carbon flow, DL_FP_M_EC_Ac_7y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Ac_7y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
ax1.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_tot_S1_Ac_7y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_DL_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(0,200)
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows(t-C)')
ax1.set_title('Carbon flow, DL_FP_M_Ac_7y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_M_EC_Ac_18y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2_s=fig.add_subplot(111)
#plot
ax2_s.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2_s.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage')
ax2_s.plot(t, decomp_tot_S1_Ac_18y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2_s.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2_s.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions')
ax2_s.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2_s.plot(t, Landfill_decomp_DL_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition')
ax2_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2_s.set_xlim(-1,200)
ax2_s.set_yscale('symlog')
ax2_s.set_xlabel('Time (year)')
ax2_s.set_ylabel('C flows (t-C) (symlog)')
ax2_s.set_title('Carbon flow, DL_FP_M_EC_Ac_18y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Ac_18y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
#plot
ax2.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_tot_S1_Ac_18y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2.plot(t, Landfill_decomp_DL_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(0,200)
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows(t-C)')
ax2.set_title('Carbon flow, DL_FP_M_Ac_18y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_M_EC_Tgr_60y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax3_s=fig.add_subplot(111)
#plot
ax3_s.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3_s.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage')
ax3_s.plot(t, decomp_tot_S1_Tgr_60y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax3_s.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3_s.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions')
ax3_s.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax3_s.plot(t, Landfill_decomp_DL_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition')
ax3_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3_s.set_xlim(-1,200)
ax3_s.set_yscale('symlog')
ax3_s.set_xlabel('Time (year)')
ax3_s.set_ylabel('C flows (t-C) (symlog)')
ax3_s.set_title('Carbon flow, DL_FP_M_EC_Tgr_60y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Tgr_60y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
#plot
ax3.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage')
ax3.plot(t, decomp_tot_S1_Tgr_60y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax3.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions')
ax3.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax3.plot(t, Landfill_decomp_DL_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition')
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(0,200)
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows(t-C)')
ax3.set_title('Carbon flow, DL_FP_M_Tgr_60y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_E_EC_Hbr_40y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax4_s=fig.add_subplot(111)
#plot
ax4_s.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4_s.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage')
ax4_s.plot(t, decomp_tot_E_Hbr_40y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax4_s.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4_s.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions')
ax4_s.plot(t, Landfill_decomp_DL_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition')
ax4_s.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax4_s.plot(t, TestDSME_Hbr40.o, label='in-use stock output')
ax4_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4_s.set_xlim(-1,200)
ax4_s.set_yscale('symlog')
ax4_s.set_xlabel('Time (year)')
ax4_s.set_ylabel('C flows (t-C) (symlog)')
ax4_s.set_title('Carbon flow, DL_FP_E_EC_Hbr_40y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_E_Hbr_40y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax4=fig.add_subplot(111)
#plot
ax4.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage')
ax4.plot(t, decomp_tot_E_Hbr_40y[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax4.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions')
ax4.plot(t, Landfill_decomp_DL_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition')
ax4.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_g.plot(t, TestDSME_Hbr40.o, label='in-use stock output')
ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4.set_xlim(0,200)
ax4.set_xlabel('Time (year)')
ax4.set_ylabel('C flows(t-C)')
ax4.set_title('Carbon flow, DL_FP_E_Hbr_40y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y[:,0], TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, Landfill_decomp_DL_FP_S1_Ac_7y, flat_list_Ac_7y]
Agg_Cflow_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y[:,0], TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, Landfill_decomp_DL_FP_S1_Ac_18y, flat_list_Ac_18y]
Agg_Cflow_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y[:,0], TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, Landfill_decomp_DL_FP_S1_Tgr_60y, flat_list_Tgr_60y]
Agg_Cflow_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y[:,0], TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, Landfill_decomp_DL_FP_E_Hbr_40y, flat_list_Hbr_40y]
Agg_Cflow_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_7y)]
Agg_Cflow_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_18y)]
Agg_Cflow_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Agg_Cflow_S1_Tgr_60y)]
Agg_Cflow_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Agg_Cflow_E_Hbr_40y)]
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_DL_FP_EC = pd.DataFrame.from_dict({'Year':year,'M_EC_Ac_7y (t-C)':Agg_Cflow_DL_FP_S1_Ac_7y, 'M_EC_Ac_18y (t-C)':Agg_Cflow_DL_FP_S1_Ac_18y,
'M_EC_Tgr_60y (t-C)':Agg_Cflow_DL_FP_S1_Tgr_60y, 'E_EC_Hbr_40y (t-C)':Agg_Cflow_DL_FP_E_Hbr_40y})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_DL_FP_EC.xlsx', engine = 'xlsxwriter')
dfM_DL_FP_EC.to_excel(writer, sheet_name = 'DL_FP_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
fig=plt.figure()
fig.show()
ax5=fig.add_subplot(111)
# plot
ax5.plot(t, Agg_Cflow_DL_FP_S1_Ac_7y, color='orange', label='M_EC_Ac_7y')
ax5.plot(t, Agg_Cflow_DL_FP_S1_Ac_18y, color='darkturquoise', label='M_EC_Ac_18y')
ax5.plot(t, Agg_Cflow_DL_FP_S1_Tgr_60y, color='lightcoral', label='M_EC_Tgr_60y')
ax5.plot(t, Agg_Cflow_DL_FP_E_Hbr_40y, color='mediumseagreen', label='E_EC_Hbr_40y')
ax5.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax5.set_xlim(-1,200)
ax5.set_ylim(-25,220)
#ax5.set_yscale('symlog')
ax5.set_xlabel('Time (year)')
ax5.set_ylabel('C flows (t-C)')
ax5.set_title('Net carbon balance, DL_FP_EC')
plt.show()
#%%
#Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df2_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df2_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df2_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df2_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE2_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
Column1 = year
division = 1000*44/12
division_CH4 = 1000*16/12
## S1_Ac_7y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Ac7 = df1_Ac7['Other_C_storage'].values
OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7]
OC_storage_S1_Ac7 = [abs(number) for number in OC_storage_S1_Ac7]
C_LF_S1_Ac7 = [x*1/0.82 for x in OC_storage_S1_Ac7]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Ac7 = [x/division for x in df1_Ac7['Input_PF'].values]
HWP_S1_Ac7_energy = [x*1/3 for x in c_firewood_energy_S1_Ac7]
HWP_S1_Ac7_landfill = [x*1/0.82 for x in OC_storage_S1_Ac7]
HWP_S1_Ac7_sum = [HWP_S1_Ac7, HWP_S1_Ac7_energy, HWP_S1_Ac7_landfill]
HWP_S1_Ac7_sum = [sum(x) for x in zip(*HWP_S1_Ac7_sum )]
#in-use stocks (S-4)
TestDSM1_Ac7.s = [x/division for x in TestDSM1_Ac7.s]
#TestDSM1_Ac7.i = [x/division for x in TestDSM1_Ac7.i]
#calculate the F1-2
#In general, F1-2 = F2-3 + F2-6,
#To split the F1-2 to F1a-2 and F1c-2, we need to differentiate the flow for the initial land conversion (PF) and the subsequent land type (FP)
#create F1a-2
#tf = 201
#zero_PF_S2_Ac_7y = (tf,1)
#PF_S2_Ac_7y = np.zeros(zero_PF_S2_Ac_7y)
#PF_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][0:8]
#create F1c-2
#zero_FP_S2_Ac_7y = (tf,1)
#FP_S2_Ac_7y = np.zeros(zero_FP_S2_Ac_7y)
#FP_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][8:tf]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Ac_7y = (tf,1)
stocks_S1_Ac_7y = np.zeros(zero_matrix_stocks_S1_Ac_7y)
i = 0
stocks_S1_Ac_7y[0] = C_LF_S1_Ac7[0] - Landfill_decomp_DL_FP_S1_Ac_7y[0]
while i < tf-1:
stocks_S1_Ac_7y[i+1] = np.array(C_LF_S1_Ac7[i+1] - Landfill_decomp_DL_FP_S1_Ac_7y[i+1] + stocks_S1_Ac_7y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac7_sum, [x*2/3 for x in c_firewood_energy_S1_Ac7])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Ac_7y = (tf,1)
ForCstocks_S1_Ac_7y = np.zeros(zero_matrix_ForCstocks_S1_Ac_7y)
i = 0
ForCstocks_S1_Ac_7y[0] = initAGB - flat_list_Ac_7y[0] - decomp_tot_S1_Ac_7y[0] - HWP_logged_S1_Ac_7y[0]
while i < tf-1:
ForCstocks_S1_Ac_7y[i+1] = np.array(ForCstocks_S1_Ac_7y[i] - flat_list_Ac_7y[i+1] - decomp_tot_S1_Ac_7y[i+1] - HWP_logged_S1_Ac_7y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
NonRW_amount_S1_Ac_7y = df1_amount_Ac7['NonRW_amount'].values
NonRW_amount_S1_Ac_7y = [x/1000 for x in NonRW_amount_S1_Ac_7y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Ac_7y = [x/division for x in emissions_NonRW_S1_Ac_7y]
#create columns
dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Ac_7y,
'F1-0 (t-C)': decomp_tot_S1_Ac_7y[:,0],
#'F1a-2 (t-C)': PF_S1_Ac_7y,
#'F1c-2 (t-C)': FP_S1_Ac_7y,
'F1-2 (t-C)': HWP_logged_S1_Ac_7y,
'St-1 (t-C)':ForCstocks_S1_Ac_7y[:,0],
'F2-3 (t-C)': HWP_S1_Ac7_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac7],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac7_sum, [x*1/0.82 for x in OC_storage_S1_Ac7], [x*1/3 for x in c_firewood_energy_S1_Ac7])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac7],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac7],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Ac7.s,
#'S-4-i (t-C)': TestDSM1_Ac7.i,
'F4-5 (t-C)': TestDSM1_Ac7.o,
'F5-6 (t-C)': TestDSM1_Ac7.o,
'F5-7 (t-C)': C_LF_S1_Ac7,
'F6-0-1 (t-C)': c_firewood_energy_S1_Ac7,
'F6-0-2 (t-C)': TestDSM1_Ac7.o,
'St-7 (t-C)': stocks_S1_Ac_7y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Ac_7y,
'F8-0 (t-C)': PH_Emissions_HWP1_Ac_7y,
'S9-0 (t)': NonRW_amount_S1_Ac_7y,
'F9-0 (t-C)': emissions_NonRW_S1_Ac_7y,
})
##S1_Ac_18y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Ac18 = df1_Ac18['Other_C_storage'].values
OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18]
OC_storage_S1_Ac18 = [abs(number) for number in OC_storage_S1_Ac18]
C_LF_S1_Ac18 = [x*1/0.82 for x in OC_storage_S1_Ac18]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Ac18 = [x/division for x in df1_Ac18['Input_PF'].values]
HWP_S1_Ac18_energy = [x*1/3 for x in c_firewood_energy_S1_Ac18]
HWP_S1_Ac18_landfill = [x*1/0.82 for x in OC_storage_S1_Ac18]
HWP_S1_Ac18_sum = [HWP_S1_Ac18, HWP_S1_Ac18_energy, HWP_S1_Ac18_landfill]
HWP_S1_Ac18_sum = [sum(x) for x in zip(*HWP_S1_Ac18_sum )]
## in-use stocks (S-4)
TestDSM1_Ac18.s = [x/division for x in TestDSM1_Ac18.s]
#TestDSM1_Ac18.i = [x/division for x in TestDSM1_Ac18.i]
#calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Ac_18y = (tf,1)
stocks_S1_Ac_18y = np.zeros(zero_matrix_stocks_S1_Ac_18y)
i = 0
stocks_S1_Ac_18y[0] = C_LF_S1_Ac18[0] - Landfill_decomp_DL_FP_S1_Ac_18y[0]
while i < tf-1:
stocks_S1_Ac_18y[i+1] = np.array(C_LF_S1_Ac18[i+1] - Landfill_decomp_DL_FP_S1_Ac_18y[i+1] + stocks_S1_Ac_18y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Ac_18y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac18_sum, [x*2/3 for x in c_firewood_energy_S1_Ac18])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Ac_18y = (tf,1)
ForCstocks_S1_Ac_18y = np.zeros(zero_matrix_ForCstocks_S1_Ac_18y)
i = 0
ForCstocks_S1_Ac_18y[0] = initAGB - flat_list_Ac_18y[0] - decomp_tot_S1_Ac_18y[0] - HWP_logged_S1_Ac_18y[0]
while i < tf-1:
ForCstocks_S1_Ac_18y[i+1] = np.array(ForCstocks_S1_Ac_18y[i] - flat_list_Ac_18y[i+1] - decomp_tot_S1_Ac_18y[i+1] - HWP_logged_S1_Ac_18y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
NonRW_amount_S1_Ac_18y = df1_amount_Ac18['NonRW_amount'].values
NonRW_amount_S1_Ac_18y = [x/1000 for x in NonRW_amount_S1_Ac_18y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Ac_18y = [x/division for x in emissions_NonRW_S1_Ac_18y]
#create columns
dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Ac_18y,
'F1-0 (t-C)': decomp_tot_S1_Ac_18y[:,0],
#'F1a-2 (t-C)': PF_S1_Ac_18y,
#'F1c-2 (t-C)': FP_S1_Ac_18y,
'F1-2 (t-C)': HWP_logged_S1_Ac_18y,
'St-1 (t-C)':ForCstocks_S1_Ac_18y[:,0],
'F2-3 (t-C)': HWP_S1_Ac18_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac18],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac18_sum, [x*1/0.82 for x in OC_storage_S1_Ac18], [x*1/3 for x in c_firewood_energy_S1_Ac18])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac18],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac18],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Ac18.s,
#'S-4-i (t-C)': TestDSM1_Ac7.i,
'F4-5 (t-C)': TestDSM1_Ac18.o,
'F5-6 (t-C)': TestDSM1_Ac18.o,
'F5-7 (t-C)': C_LF_S1_Ac18,
'F6-0-1 (t-C)': c_firewood_energy_S1_Ac18,
'F6-0-2 (t-C)': TestDSM1_Ac18.o,
'St-7 (t-C)': stocks_S1_Ac_18y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Ac_18y,
'F8-0 (t-C)': PH_Emissions_HWP1_Ac_18y,
'S9-0 (t)': NonRW_amount_S1_Ac_18y,
'F9-0 (t-C)': emissions_NonRW_S1_Ac_18y,
})
##S1_Tgr_60y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Tgr60 = df1_Tgr60['Other_C_storage'].values
OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60]
OC_storage_S1_Tgr60 = [abs(number) for number in OC_storage_S1_Tgr60]
C_LF_S1_Tgr60 = [x*1/0.82 for x in OC_storage_S1_Tgr60]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Tgr60 = [x/division for x in df1_Tgr60['Input_PF'].values]
HWP_S1_Tgr60_energy = [x*1/3 for x in c_firewood_energy_S1_Tgr60]
HWP_S1_Tgr60_landfill = [x*1/0.82 for x in OC_storage_S1_Tgr60]
HWP_S1_Tgr60_sum = [HWP_S1_Tgr60, HWP_S1_Tgr60_energy, HWP_S1_Tgr60_landfill]
HWP_S1_Tgr60_sum = [sum(x) for x in zip(*HWP_S1_Tgr60_sum )]
## in-use stocks (S-4)
TestDSM1_Tgr60.s = [x/division for x in TestDSM1_Tgr60.s]
#TestDSM1_Tgr60.i = [x/division for x in TestDSM1_Tgr60.i]
## calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Tgr_60y = (tf,1)
stocks_S1_Tgr_60y = np.zeros(zero_matrix_stocks_S1_Tgr_60y)
i = 0
stocks_S1_Tgr_60y[0] = C_LF_S1_Tgr60[0] - Landfill_decomp_DL_FP_S1_Tgr_60y[0]
while i < tf-1:
stocks_S1_Tgr_60y[i+1] = np.array(C_LF_S1_Tgr60[i+1] - Landfill_decomp_DL_FP_S1_Tgr_60y[i+1] + stocks_S1_Tgr_60y[i])
i = i + 1
#print(stocks_S2_Ac_7y[:])
#print(type(stocks_S2_Ac_7y))
#print(type(C_LF_S2_Ac7))
#print(type(Landfill_decomp_PF_FP_S2_Ac_7y))
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Tgr_60y = [x1+x2 for (x1,x2) in zip(HWP_S1_Tgr60_sum, [x*2/3 for x in c_firewood_energy_S1_Tgr60])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Tgr_60y = (tf,1)
ForCstocks_S1_Tgr_60y = np.zeros(zero_matrix_ForCstocks_S1_Tgr_60y)
i = 0
ForCstocks_S1_Tgr_60y[0] = initAGB - flat_list_Tgr_60y[0] - decomp_tot_S1_Tgr_60y[0] - HWP_logged_S1_Tgr_60y[0]
while i < tf-1:
ForCstocks_S1_Tgr_60y[i+1] = np.array(ForCstocks_S1_Tgr_60y[i] - flat_list_Tgr_60y[i+1] - decomp_tot_S1_Tgr_60y[i+1] - HWP_logged_S1_Tgr_60y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
NonRW_amount_S1_Tgr_60y = df1_amount_Tgr60['NonRW_amount'].values
NonRW_amount_S1_Tgr_60y = [x/1000 for x in NonRW_amount_S1_Tgr_60y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Tgr_60y = [x/division for x in emissions_NonRW_S1_Tgr_60y]
#create columns
dfM_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Tgr_60y,
'F1-0 (t-C)': decomp_tot_S1_Tgr_60y[:,0],
#'F1a-2 (t-C)': PF_S1_Tgr_60y,
#'F1c-2 (t-C)': FP_S1_Tgr_60y,
'F1-2 (t-C)': HWP_logged_S1_Tgr_60y,
'St-1 (t-C)':ForCstocks_S1_Tgr_60y[:,0],
'F2-3 (t-C)': HWP_S1_Tgr60_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Tgr60],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Tgr60_sum, [x*1/0.82 for x in OC_storage_S1_Tgr60], [x*1/3 for x in c_firewood_energy_S1_Tgr60])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Tgr60],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Tgr60],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Tgr60.s,
#'S-4-i (t-C)': TestDSM1_Tgr60.i,
'F4-5 (t-C)': TestDSM1_Tgr60.o,
'F5-6 (t-C)': TestDSM1_Tgr60.o,
'F5-7 (t-C)': C_LF_S1_Tgr60,
'F6-0-1 (t-C)': c_firewood_energy_S1_Tgr60,
'F6-0-2 (t-C)': TestDSM1_Tgr60.o,
'St-7 (t-C)': stocks_S1_Tgr_60y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Tgr_60y,
'F8-0 (t-C)': PH_Emissions_HWP1_Tgr_60y,
'S9-0 (t)': NonRW_amount_S1_Tgr_60y,
'F9-0 (t-C)': emissions_NonRW_S1_Tgr_60y,
})
##S1_E_Hbr_40y
## define the input flow for the landfill (F5-7)
OC_storage_E_Hbr40 = dfE_Hbr40['Other_C_storage'].values
OC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40]
OC_storage_E_Hbr40 = [abs(number) for number in OC_storage_E_Hbr40]
C_LF_E_Hbr40 = [x*1/0.82 for x in OC_storage_E_Hbr40]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_E_Hbr40 = [x/division for x in dfE_Hbr40['Wood_pellets'].values]
HWP_E_Hbr40_energy = [x*1/3 for x in c_firewood_energy_E_Hbr40]
HWP_E_Hbr40_landfill = [x*1/0.82 for x in OC_storage_E_Hbr40]
HWP_E_Hbr40_sum = [HWP_E_Hbr40, HWP_E_Hbr40_energy, HWP_E_Hbr40_landfill]
HWP_E_Hbr40_sum = [sum(x) for x in zip(*HWP_E_Hbr40_sum )]
## in-use stocks (S-4)
TestDSME_Hbr40.s = [x/division for x in TestDSME_Hbr40.s]
## calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_E_Hbr_40y = (tf,1)
stocks_E_Hbr_40y = np.zeros(zero_matrix_stocks_E_Hbr_40y)
i = 0
stocks_E_Hbr_40y[0] = C_LF_E_Hbr40[0] - Landfill_decomp_DL_FP_E_Hbr_40y[0]
while i < tf-1:
stocks_E_Hbr_40y[i+1] = np.array(C_LF_E_Hbr40[i+1] - Landfill_decomp_DL_FP_E_Hbr_40y[i+1] + stocks_E_Hbr_40y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_E_Hbr_40y = [x1+x2 for (x1,x2) in zip(HWP_E_Hbr40_sum, [x*2/3 for x in c_firewood_energy_E_Hbr40])]
#calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_E_Hbr_40y = (tf,1)
ForCstocks_E_Hbr_40y = np.zeros(zero_matrix_ForCstocks_E_Hbr_40y)
i = 0
ForCstocks_E_Hbr_40y[0] = initAGB - flat_list_Hbr_40y[0] - decomp_tot_E_Hbr_40y[0] - HWP_logged_E_Hbr_40y[0]
while i < tf-1:
ForCstocks_E_Hbr_40y[i+1] = np.array(ForCstocks_E_Hbr_40y[i] - flat_list_Hbr_40y[i+1] - decomp_tot_E_Hbr_40y[i+1] - HWP_logged_E_Hbr_40y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
dfE_amount_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
NonRW_amount_E_Hbr_40y = dfE_amount_Hbr40['NonRW_amount'].values
NonRW_amount_E_Hbr_40y = [x/1000 for x in NonRW_amount_E_Hbr_40y]
##NonRW emissions (F9-0-2)
emissions_NonRW_E_Hbr_40y = [x/division for x in emissions_NonRW_E_Hbr_40y]
#create columns
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Hbr_40y,
'F1-0 (t-C)': decomp_tot_E_Hbr_40y[:,0],
#'F1a-2 (t-C)': PF_S2_Tgr_60y,
#'F1c-2 (t-C)': FP_S2_Tgr_60y,
'F1-2 (t-C)': HWP_logged_E_Hbr_40y,
'St-1 (t-C)':ForCstocks_E_Hbr_40y[:,0],
'F2-3 (t-C)': HWP_E_Hbr40_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_E_Hbr40],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_E_Hbr40_sum, [x*1/0.82 for x in OC_storage_E_Hbr40], [x*1/3 for x in c_firewood_energy_E_Hbr40])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_E_Hbr40],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_E_Hbr40],
'F4-0 (t-C)': c_pellets_Hbr_40y,
'St-4 (t-C)': TestDSME_Hbr40.s,
#'S-4-i (t-C)': TestDSME_Hbr40.i,
'F4-5 (t-C)': TestDSME_Hbr40.o,
'F5-6 (t-C)': TestDSME_Hbr40.o,
'F5-7 (t-C)': C_LF_E_Hbr40,
'F6-0-1 (t-C)': c_firewood_energy_E_Hbr40,
'F6-0-2 (t-C)': TestDSME_Hbr40.o,
'St-7 (t-C)': stocks_E_Hbr_40y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_E_Hbr_40y,
'F8-0 (t-C)': PH_Emissions_HWPE_Hbr_40y,
'S9-0 (t)': NonRW_amount_E_Hbr_40y,
'F9-0 (t-C)': emissions_NonRW_E_Hbr_40y,
})
writer = pd.ExcelWriter('C_flows_SysDef_DL_FP_EC.xlsx', engine = 'xlsxwriter')
dfM_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Ac_7y', header=True, index=False)
dfM_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Ac_18y', header=True, index=False)
dfM_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Tgr_60y', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_EC_Hbr_40y', header=True, index=False)
writer.save()
writer.close()
#%%
| 34.197836
| 268
| 0.709945
| 22,152
| 123,249
| 3.569113
| 0.029704
| 0.025903
| 0.022931
| 0.006071
| 0.892529
| 0.847122
| 0.799185
| 0.732074
| 0.667898
| 0.619632
| 0
| 0.083926
| 0.167133
| 123,249
| 3,604
| 269
| 34.197836
| 0.686293
| 0.230874
| 0
| 0.318634
| 0
| 0
| 0.133735
| 0.02521
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.004348
| 0.013665
| 0.032919
| 0.098758
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed5dc2a53343033ed8ace3353be469af11dc06bd
| 7,081
|
py
|
Python
|
projects/vdk-plugins/vdk-trino/tests/test_vdk_trino_lineage_utils.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 100
|
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/vdk-trino/tests/test_vdk_trino_lineage_utils.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 208
|
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/vdk-trino/tests/test_vdk_trino_lineage_utils.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 14
|
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import json
from vdk.plugin.trino.lineage_utils import _get_input_tables_from_explain
from vdk.plugin.trino.lineage_utils import _get_lineage_table_from_plan
from vdk.plugin.trino.lineage_utils import _lineage_table_from_name
from vdk.plugin.trino.lineage_utils import get_rename_table_lineage_from_query
from vdk.plugin.trino.lineage_utils import is_heartbeat_query
def test_is_heartbeat_query():
assert is_heartbeat_query("select 1")
assert is_heartbeat_query("select 'aaa'")
assert not is_heartbeat_query("select * from a_table")
def test_lineage_table_from_name():
lineage_table = _lineage_table_from_name(
table_name="test_table", schema="default_schema", catalog="default_catalog"
)
assert lineage_table.catalog == "default_catalog"
assert lineage_table.schema == "default_schema"
assert lineage_table.table == "test_table"
def test_lineage_table_from_name_and_schema():
lineage_table = _lineage_table_from_name(
table_name="test_schema.test_table",
schema="default_schema",
catalog="default_catalog",
)
assert lineage_table.catalog == "default_catalog"
assert lineage_table.schema == "test_schema"
assert lineage_table.table == "test_table"
def test_lineage_table_from_name_and_schema_and_catalog():
lineage_table = _lineage_table_from_name(
table_name="test_catalog.test_schema.test_table",
schema="default_schema",
catalog="default_catalog",
)
assert lineage_table.catalog == "test_catalog"
assert lineage_table.schema == "test_schema"
assert lineage_table.table == "test_table"
def test_get_lineage_table_from_plan():
table_dict = json.loads(
"""
{
"catalog": "test_catalog",
"schemaTable": {
"schema": "test_schema",
"table": "test_table"
}
}
"""
)
lineage_table = _get_lineage_table_from_plan(table_dict)
assert lineage_table.catalog == "test_catalog"
assert lineage_table.schema == "test_schema"
assert lineage_table.table == "test_table"
def test_get_input_tables_from_explain():
explain_io_json = """
{
"inputTableColumnInfos" : [ {
"table" : {
"catalog" : "hive",
"schemaTable" : {
"schema" : "history",
"table" : "palexiev2"
}
},
"columnConstraints" : [ ],
"estimate" : {
"outputRowCount" : 0.0,
"outputSizeInBytes" : 0.0,
"cpuCost" : 0.0,
"maxMemory" : 0.0,
"networkCost" : 0.0
}
}, {
"table" : {
"catalog" : "hive",
"schemaTable" : {
"schema" : "history",
"table" : "palexiev"
}
},
"columnConstraints" : [ ],
"estimate" : {
"outputRowCount" : 0.0,
"outputSizeInBytes" : 0.0,
"cpuCost" : 0.0,
"maxMemory" : 0.0,
"networkCost" : 0.0
}
} ],
"estimate" : {
"outputRowCount" : 0.0,
"outputSizeInBytes" : 0.0,
"cpuCost" : 0.0,
"maxMemory" : 0.0,
"networkCost" : 0.0
}
}
"""
explain_dict = json.loads(explain_io_json)
lineage_tables = _get_input_tables_from_explain(
explain_dict["inputTableColumnInfos"]
)
table1 = lineage_tables[0]
assert table1.catalog == "hive"
assert table1.schema == "history"
assert table1.table == "palexiev2"
table2 = lineage_tables[1]
assert table2.catalog == "hive"
assert table2.schema == "history"
assert table2.table == "palexiev"
def test_get_rename_table_lineage_from_query():
query = "alter table tbl_from rename to tbl_to"
lineage_data = get_rename_table_lineage_from_query(
query, "test_schema", "test_catalog"
)
assert lineage_data is not None
assert lineage_data.query == query
assert lineage_data.query_type == "rename_table"
assert lineage_data.query_status == "OK"
assert lineage_data.input_tables is not None
assert len(lineage_data.input_tables) == 1
assert lineage_data.input_tables[0].table == "tbl_from"
assert lineage_data.input_tables[0].schema == "test_schema"
assert lineage_data.input_tables[0].catalog == "test_catalog"
assert lineage_data.output_table is not None
assert lineage_data.output_table.table == "tbl_to"
assert lineage_data.output_table.schema == "test_schema"
assert lineage_data.output_table.catalog == "test_catalog"
def test_get_rename_table_lineage_from_query_with_schema():
query = "alter table test_schema.tbl_from rename to test_schema.tbl_to"
lineage_data = get_rename_table_lineage_from_query(
query, "wrong_schema", "test_catalog"
)
assert lineage_data is not None
assert lineage_data.query == query
assert lineage_data.query_type == "rename_table"
assert lineage_data.query_status == "OK"
assert lineage_data.input_tables is not None
assert len(lineage_data.input_tables) == 1
assert lineage_data.input_tables[0].table == "tbl_from"
assert lineage_data.input_tables[0].schema == "test_schema"
assert lineage_data.input_tables[0].catalog == "test_catalog"
assert lineage_data.output_table is not None
assert lineage_data.output_table.table == "tbl_to"
assert lineage_data.output_table.schema == "test_schema"
assert lineage_data.output_table.catalog == "test_catalog"
def test_get_rename_table_lineage_from_query_full_names():
query = "alter table test_catalog.test_schema.tbl_from rename to test_catalog.test_schema.tbl_to"
lineage_data = get_rename_table_lineage_from_query(
query, "wrong_schema", "wrong_catalog"
)
assert lineage_data is not None
assert lineage_data.query == query
assert lineage_data.query_type == "rename_table"
assert lineage_data.query_status == "OK"
assert lineage_data.input_tables is not None
assert len(lineage_data.input_tables) == 1
assert lineage_data.input_tables[0].table == "tbl_from"
assert lineage_data.input_tables[0].schema == "test_schema"
assert lineage_data.input_tables[0].catalog == "test_catalog"
assert lineage_data.output_table is not None
assert lineage_data.output_table.table == "tbl_to"
assert lineage_data.output_table.schema == "test_schema"
assert lineage_data.output_table.catalog == "test_catalog"
def test_get_rename_table_lineage_from_query_false_cases():
assert (
get_rename_table_lineage_from_query(
"alter table tbl1 add column col1 int", "test_schema", "test_catalog"
)
is None
)
assert (
get_rename_table_lineage_from_query(
"alter table tbl1 rename column col1 to col2",
"test_schema",
"test_catalog",
)
is None
)
assert (
get_rename_table_lineage_from_query(
"alter view view1 rename to view2", "test_schema", "test_catalog"
)
is None
)
| 33.880383
| 101
| 0.677305
| 872
| 7,081
| 5.129587
| 0.09633
| 0.139504
| 0.136821
| 0.073776
| 0.839258
| 0.813324
| 0.787615
| 0.717416
| 0.682987
| 0.654594
| 0
| 0.012373
| 0.223838
| 7,081
| 208
| 102
| 34.043269
| 0.801492
| 0.008897
| 0
| 0.557471
| 0
| 0
| 0.304791
| 0.024028
| 0
| 0
| 0
| 0
| 0.362069
| 1
| 0.057471
| false
| 0
| 0.034483
| 0
| 0.091954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed7434d2a3bb706cb2fbdda02b7f5c9aa27cb512
| 338
|
py
|
Python
|
G4 Localizer/a2_Camera/cameraModel.py
|
cbrahana/FRC-Localizer-Systems
|
740c88ec6e0af490e703e8a5c544434c0f33ee0b
|
[
"MIT"
] | null | null | null |
G4 Localizer/a2_Camera/cameraModel.py
|
cbrahana/FRC-Localizer-Systems
|
740c88ec6e0af490e703e8a5c544434c0f33ee0b
|
[
"MIT"
] | null | null | null |
G4 Localizer/a2_Camera/cameraModel.py
|
cbrahana/FRC-Localizer-Systems
|
740c88ec6e0af490e703e8a5c544434c0f33ee0b
|
[
"MIT"
] | null | null | null |
class Camera:
def __init__(): #Need to have all information necessary to calibrate camera input into here as arguements
#Calibrate Camera - constant
#Locate Camera Relative to Centerpoint - constant
#Define GPIO pins for synchronization - constant
#Change camera settings - tunable
return None
| 48.285714
| 109
| 0.704142
| 39
| 338
| 6
| 0.769231
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257396
| 338
| 7
| 110
| 48.285714
| 0.932271
| 0.718935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ed7765e2ab92212613889bc15172427bc5f6b297
| 56
|
py
|
Python
|
utime/hypnogram/__init__.py
|
aluquecerp/U-Time
|
c792259825b57e49544684ce2997f3ac8db84c6e
|
[
"MIT"
] | 138
|
2019-11-20T02:31:17.000Z
|
2022-03-23T04:31:51.000Z
|
utime/hypnogram/__init__.py
|
amiyapatanaik/U-Time
|
a9ed4892da77d165a71dbfef1d069d782c909757
|
[
"MIT"
] | 46
|
2019-12-04T03:13:28.000Z
|
2022-03-31T13:10:48.000Z
|
utime/hypnogram/__init__.py
|
amiyapatanaik/U-Time
|
a9ed4892da77d165a71dbfef1d069d782c909757
|
[
"MIT"
] | 42
|
2019-11-26T16:02:26.000Z
|
2022-01-06T11:01:32.000Z
|
from .hypnograms import SparseHypnogram, DenseHypnogram
| 28
| 55
| 0.875
| 5
| 56
| 9.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 56
| 1
| 56
| 56
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c02ea62132d4663cf6b795eb9ce4bb6b4dfe5b1
| 212
|
py
|
Python
|
emos/__init__.py
|
CubeFlix/emos
|
7f84100908e78384c82777ec3bee0cc1b130cefb
|
[
"MIT"
] | 1
|
2021-05-26T17:41:07.000Z
|
2021-05-26T17:41:07.000Z
|
emos/__init__.py
|
CubeFlix/emos
|
7f84100908e78384c82777ec3bee0cc1b130cefb
|
[
"MIT"
] | null | null | null |
emos/__init__.py
|
CubeFlix/emos
|
7f84100908e78384c82777ec3bee0cc1b130cefb
|
[
"MIT"
] | null | null | null |
"""
- EMOS Main Source Code -
(C) Cubeflix 2021 (EMOS)
"""
# Imports
from .misc import *
from .memory import *
from .cpu import *
from .operatingsystem import *
from .computer import *
from .screen import *
| 12.470588
| 30
| 0.679245
| 27
| 212
| 5.333333
| 0.592593
| 0.347222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023669
| 0.20283
| 212
| 16
| 31
| 13.25
| 0.828402
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c3aab888d3f65753a4185dde758c2dad29802a9
| 103
|
py
|
Python
|
terrascript/spotinst/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/spotinst/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/spotinst/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/spotinst/__init__.py
import terrascript
class spotinst(terrascript.Provider):
pass
| 14.714286
| 37
| 0.796117
| 11
| 103
| 7.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126214
| 103
| 6
| 38
| 17.166667
| 0.866667
| 0.31068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
9c4506a37458e4187e29a1ff1afd9c962565ea1d
| 122
|
py
|
Python
|
pafd/admin.py
|
kavinzhao/fduhole
|
508922cfa0558c58b95206dd8fbf51d10525fa1e
|
[
"Apache-2.0"
] | 9
|
2021-04-14T12:08:38.000Z
|
2021-12-16T08:14:40.000Z
|
pafd/admin.py
|
kavinzhao/fduhole
|
508922cfa0558c58b95206dd8fbf51d10525fa1e
|
[
"Apache-2.0"
] | 9
|
2021-04-18T09:48:25.000Z
|
2021-11-26T07:43:22.000Z
|
pafd/admin.py
|
kavinzhao/fduhole
|
508922cfa0558c58b95206dd8fbf51d10525fa1e
|
[
"Apache-2.0"
] | 4
|
2021-07-15T02:10:42.000Z
|
2022-01-22T02:12:11.000Z
|
from django.contrib import admin
from .models import Student
# Register your models here.
# admin.site.register(Student)
| 20.333333
| 32
| 0.795082
| 17
| 122
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 122
| 5
| 33
| 24.4
| 0.915094
| 0.45082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c7d79b032a1a7393980f36ae8b429df16d52012
| 2,376
|
py
|
Python
|
migrations/versions/0d14cc3a1cf1_add_private_key_and_csr_to_certificate_.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | 1
|
2021-11-16T17:25:21.000Z
|
2021-11-16T17:25:21.000Z
|
migrations/versions/0d14cc3a1cf1_add_private_key_and_csr_to_certificate_.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | 1
|
2021-12-22T19:04:34.000Z
|
2021-12-22T19:04:34.000Z
|
migrations/versions/0d14cc3a1cf1_add_private_key_and_csr_to_certificate_.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | null | null | null |
"""add private key and csr to certificate, track certificate in operation
Revision ID: 0d14cc3a1cf1
Revises: 531893054cdf
Create Date: 2021-09-10 23:03:20.631787
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = "0d14cc3a1cf1"
down_revision = "531893054cdf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_cdn():
op.add_column(
"certificates",
sa.Column(
"private_key_pem",
sqlalchemy_utils.types.encrypted.encrypted_type.StringEncryptedType(),
nullable=True,
),
)
op.add_column("certificates", sa.Column("csr_pem", sa.Text(), nullable=True))
op.add_column(
"operations", sa.Column("certificate_id", sa.Integer(), nullable=True)
)
op.create_foreign_key(
op.f("fk_operations_certificate_id_certificates"),
"operations",
"certificates",
["certificate_id"],
["id"],
)
def downgrade_cdn():
op.drop_constraint(
op.f("fk_operations_certificate_id_certificates"),
"operations",
type_="foreignkey",
)
op.drop_column("operations", "certificate_id")
op.drop_column("certificates", "csr_pem")
op.drop_column("certificates", "private_key_pem")
def upgrade_domain():
op.add_column(
"certificates",
sa.Column(
"private_key_pem",
sqlalchemy_utils.types.encrypted.encrypted_type.StringEncryptedType(),
nullable=True,
),
)
op.add_column("certificates", sa.Column("csr_pem", sa.Text(), nullable=True))
op.add_column(
"operations", sa.Column("certificate_id", sa.Integer(), nullable=True)
)
op.create_foreign_key(
op.f("fk_operations_certificate_id_certificates"),
"operations",
"certificates",
["certificate_id"],
["id"],
)
def downgrade_domain():
op.drop_constraint(
op.f("fk_operations_certificate_id_certificates"),
"operations",
type_="foreignkey",
)
op.drop_column("operations", "certificate_id")
op.drop_column("certificates", "csr_pem")
op.drop_column("certificates", "private_key_pem")
| 25.826087
| 82
| 0.651094
| 262
| 2,376
| 5.629771
| 0.259542
| 0.088136
| 0.044746
| 0.062373
| 0.710508
| 0.710508
| 0.710508
| 0.710508
| 0.710508
| 0.710508
| 0
| 0.026983
| 0.220118
| 2,376
| 91
| 83
| 26.10989
| 0.769023
| 0.083754
| 0
| 0.666667
| 0
| 0
| 0.279391
| 0.075611
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92bcda30a2c5b17469f5f875c18b381cd8272fad
| 161
|
py
|
Python
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tagme/admin.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | 297
|
2019-01-25T08:44:08.000Z
|
2022-03-29T18:46:08.000Z
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tagme/admin.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | 22
|
2019-05-06T14:21:04.000Z
|
2022-02-21T10:05:25.000Z
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tagme/admin.py
|
Nahid-Hassan/fullstack-software-development
|
892ffb33e46795061ea63378279a6469de317b1a
|
[
"CC0-1.0"
] | 412
|
2019-02-12T20:44:43.000Z
|
2022-03-30T04:23:25.000Z
|
from django.contrib import admin
# Register your models here.
from tagme.models import Forum, Comment
admin.site.register(Forum)
admin.site.register(Comment)
| 17.888889
| 39
| 0.801242
| 23
| 161
| 5.608696
| 0.565217
| 0.139535
| 0.263566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118012
| 161
| 8
| 40
| 20.125
| 0.908451
| 0.161491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92c04fd9a550435a0d5f9a86e5aa73256572f8b4
| 207
|
py
|
Python
|
decomp/semantics/__init__.py
|
esteng/decomp
|
a6996b379e4a5e1a70a28b2b6f86bf39160ee10b
|
[
"MIT"
] | 48
|
2019-10-01T13:33:24.000Z
|
2022-02-14T13:58:57.000Z
|
decomp/semantics/__init__.py
|
esteng/decomp
|
a6996b379e4a5e1a70a28b2b6f86bf39160ee10b
|
[
"MIT"
] | 15
|
2019-10-01T15:01:36.000Z
|
2021-05-25T17:23:22.000Z
|
decomp/semantics/__init__.py
|
esteng/decomp
|
a6996b379e4a5e1a70a28b2b6f86bf39160ee10b
|
[
"MIT"
] | 9
|
2020-03-02T17:54:17.000Z
|
2021-06-17T19:53:53.000Z
|
"""
Module for representing PredPatt and UDS graphs
This module represents PredPatt and UDS graphs using networkx. It
incorporates the dependency parse-based graphs from the syntax module
as subgraphs.
"""
| 25.875
| 69
| 0.806763
| 29
| 207
| 5.758621
| 0.724138
| 0.131737
| 0.167665
| 0.239521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149758
| 207
| 7
| 70
| 29.571429
| 0.948864
| 0.956522
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92c161d079c65843de6ca5fac529e3cb181171a2
| 223
|
py
|
Python
|
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/__init__.py
|
kevinbache/python-data-project
|
867cb9650d86c4049f382a54d5c02210c6901e59
|
[
"MIT"
] | null | null | null |
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/__init__.py
|
kevinbache/python-data-project
|
867cb9650d86c4049f382a54d5c02210c6901e59
|
[
"MIT"
] | null | null | null |
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/__init__.py
|
kevinbache/python-data-project
|
867cb9650d86c4049f382a54d5c02210c6901e59
|
[
"MIT"
] | null | null | null |
"""{{ cookiecutter.package_name }} - {{ cookiecutter.package_description }}"""
__version__ = '{{ cookiecutter.package_version }}'
__author__ = '{{ cookiecutter.author_name }} <{{ cookiecutter.author_email }}>'
__all__ = []
| 44.6
| 79
| 0.704036
| 18
| 223
| 7.777778
| 0.444444
| 0.407143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103139
| 223
| 5
| 80
| 44.6
| 0.7
| 0.32287
| 0
| 0
| 0
| 0
| 0.671233
| 0.527397
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92cbe41d5ffac041aa53916a162299bb2ec2306e
| 1,196
|
py
|
Python
|
examples/demo_gen_annular_FPM.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 4
|
2019-05-22T22:24:01.000Z
|
2021-07-21T13:32:36.000Z
|
examples/demo_gen_annular_FPM.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 11
|
2018-06-22T01:05:07.000Z
|
2021-11-03T13:46:25.000Z
|
examples/demo_gen_annular_FPM.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 2
|
2018-06-21T23:58:06.000Z
|
2021-07-13T21:25:23.000Z
|
import sys
sys.path.insert(0,"../")
import falco
import numpy as np
import matplotlib.pyplot as plt
inputs = {}
inputs["FPMampFac"] = 0.
inputs["pixresFPM"] = 3
inputs["rhoInner"] = 6.5
inputs["centering"] = 'pixel'
# %% With Outer Ring
inputs["rhoOuter"] = 20.0
fpm = falco.mask.falco_gen_annular_FPM(inputs)
plt.imshow(fpm); plt.colorbar(); plt.pause(0.1)
if("centering" in inputs.keys()): # Check symmetry
if inputs["centering"]=='pixel':
plt.imshow(fpm[1::,1::]-np.fliplr(fpm[1::,1::])); plt.colorbar(); plt.pause(0.1) #--Check centering
elif inputs["centering"]=='interpixel':
plt.imshow(fpm-np.fliplr(fpm)); plt.colorbar(); plt.pause(0.1) #--Check centering
# %% Without Outer Ring
inputs["rhoOuter"] = np.Infinity
fpm = falco.mask.falco_gen_annular_FPM(inputs)
plt.imshow(fpm); plt.colorbar(); plt.pause(0.1)
if("centering" in inputs.keys()): # Check symmetry
if inputs["centering"]=='pixel':
plt.imshow(fpm[1::,1::]-np.fliplr(fpm[1::,1::])); plt.colorbar(); plt.pause(0.1) #--Check centering
elif inputs["centering"]=='interpixel':
plt.imshow(fpm-np.fliplr(fpm)); plt.colorbar(); plt.pause(0.1) #--Check centering
| 30.666667
| 107
| 0.650502
| 173
| 1,196
| 4.462428
| 0.254335
| 0.069948
| 0.093264
| 0.147668
| 0.715026
| 0.715026
| 0.715026
| 0.715026
| 0.715026
| 0.715026
| 0
| 0.02729
| 0.14214
| 1,196
| 38
| 108
| 31.473684
| 0.725146
| 0.115385
| 0
| 0.538462
| 0
| 0
| 0.13632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92cfb8ab0843c53700a6fac8d59707f34be2d7ed
| 45
|
py
|
Python
|
reqsit/__init__.py
|
mattyWuh/reqsit
|
c4b6075c0f2e0e47263c5f9d5966c3b382148de1
|
[
"MIT"
] | 1
|
2021-11-15T18:52:56.000Z
|
2021-11-15T18:52:56.000Z
|
tests/__init__.py
|
mattyWuh/reqsit
|
c4b6075c0f2e0e47263c5f9d5966c3b382148de1
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
mattyWuh/reqsit
|
c4b6075c0f2e0e47263c5f9d5966c3b382148de1
|
[
"MIT"
] | null | null | null |
"""Copyright matt witt 2021, MIT License."""
| 22.5
| 44
| 0.688889
| 6
| 45
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.133333
| 45
| 1
| 45
| 45
| 0.692308
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92fa8ba51f239bdaf98242c556145a65055f137c
| 324
|
py
|
Python
|
tests/linter.py
|
Conor-Behard333/covid_alarm_clock
|
a53c58164da2bc8f0fd257e4aa5a6662abab44fa
|
[
"MIT"
] | null | null | null |
tests/linter.py
|
Conor-Behard333/covid_alarm_clock
|
a53c58164da2bc8f0fd257e4aa5a6662abab44fa
|
[
"MIT"
] | null | null | null |
tests/linter.py
|
Conor-Behard333/covid_alarm_clock
|
a53c58164da2bc8f0fd257e4aa5a6662abab44fa
|
[
"MIT"
] | null | null | null |
"""Used to test the formatting of the code using pylint"""
import pylint.lint
pylint_opts = ['../logger_setup.py', '../main.py', '../api_handling/get_config_info.py', '../api_handling/get_covid_info.py',
'../api_handling/get_news_info.py', '../api_handling/get_weather_info.py']
pylint.lint.Run(pylint_opts)
| 46.285714
| 125
| 0.703704
| 49
| 324
| 4.346939
| 0.510204
| 0.093897
| 0.244131
| 0.300469
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114198
| 324
| 6
| 126
| 54
| 0.74216
| 0.160494
| 0
| 0
| 0
| 0
| 0.609023
| 0.503759
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
133393bc0891217bd2d5ce2124d5af09e5ccdcbc
| 212
|
py
|
Python
|
first_app/admin.py
|
Vedant1202/Django-Demo
|
6c7e21d210c9b32bcb5ecb8e7cce2a33ac5c21cc
|
[
"MIT"
] | null | null | null |
first_app/admin.py
|
Vedant1202/Django-Demo
|
6c7e21d210c9b32bcb5ecb8e7cce2a33ac5c21cc
|
[
"MIT"
] | null | null | null |
first_app/admin.py
|
Vedant1202/Django-Demo
|
6c7e21d210c9b32bcb5ecb8e7cce2a33ac5c21cc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from first_app.models import Topic, Webpage, AccessRecord
# Register your models here.
admin.site.register(Topic)
admin.site.register(Webpage)
admin.site.register(AccessRecord)
| 23.555556
| 57
| 0.820755
| 29
| 212
| 5.965517
| 0.517241
| 0.156069
| 0.294798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 212
| 8
| 58
| 26.5
| 0.901042
| 0.122642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
133807713aa0a037db3203058038d938705abaad
| 6,644
|
py
|
Python
|
unit_test/test_snfolds.py
|
melloddy/MELLODDY-TUNERv1
|
37a70402bee53fc4aa221257213e87ac4d6d750a
|
[
"MIT"
] | 24
|
2020-08-28T14:58:15.000Z
|
2021-12-30T14:40:16.000Z
|
unit_test/test_snfolds.py
|
melloddy/MELLODDY-TUNERv1
|
37a70402bee53fc4aa221257213e87ac4d6d750a
|
[
"MIT"
] | null | null | null |
unit_test/test_snfolds.py
|
melloddy/MELLODDY-TUNERv1
|
37a70402bee53fc4aa221257213e87ac4d6d750a
|
[
"MIT"
] | 6
|
2021-04-02T12:51:15.000Z
|
2022-03-15T21:44:57.000Z
|
import os
import unittest
import sys
from melloddy_tuner.utils.config import ConfigDict, SecretDict
from melloddy_tuner.utils.scaffold_folding import ScaffoldFoldAssign
from melloddy_tuner.utils.df_transformer import DfTransformer
import filecmp
from pathlib import Path
curDir = os.path.dirname(os.path.abspath(__file__))
print(curDir)
class SNFoldCalculationTests(unittest.TestCase):
############################
#### setup and teardown ####
############################
# executed after each test
def tearDown(self):
pass
def setUp(self):
self.config = ConfigDict(
config_path=Path(
os.path.join(curDir, "reference_files", "example_parameters.json")
)
).get_parameters()
self.keys = SecretDict(
key_path=Path(os.path.join(curDir, "reference_files", "example_key.json"))
).get_secrets()
# def defineConfig(self,fp=3):
# if(fp==3):
# tuner.config.parameters.get_parameters(path=curDir+"/../tests/structure_preparation_test/example_parameters.json")
# else:
# tuner.config.parameters.get_parameters(path=curDir+"/input/ecfp2_param.json")
#
# def defineConfigNewSecret(self):
# tuner.config.parameters.get_parameters(path=curDir+"/input/new_secret_param.json")
###############
#### tests ####
###############
def test_calculate_snfold_single_hard(self):
"""test the single claculation based on hard coded parameters"""
input_smiles = (
"Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)CN5CCN(CC5)C" # imatinib
)
sa = ScaffoldFoldAssign(nfolds=5, secret="melloddy")
result_actual = sa.calculate_single(input_smiles)
result_expected = (
"O=C(Nc1cccc(Nc2nccc(-c3cccnc3)n2)c1)c1ccc(CN2CCNCC2)cc1",
"c1ccc(Nc2nccc(-c3cccnc3)n2)cc1",
2,
True,
None,
)
self.assertEqual(result_actual, result_expected)
def test_calculate_snfold_single_config(self):
"""test the single claculation based on config file conent"""
input_smiles = (
"Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)CN5CCN(CC5)C" # imatinib
)
sa = ScaffoldFoldAssign(
nfolds=self.config["scaffold_folding"]["nfolds"], secret=self.keys["key"]
)
result_actual = sa.calculate_single(input_smiles)
result_expected = (
"O=C(Nc1cccc(Nc2nccc(-c3cccnc3)n2)c1)c1ccc(CN2CCNCC2)cc1",
"c1ccc(Nc2nccc(-c3cccnc3)n2)cc1",
2,
True,
None,
)
self.assertEqual(result_actual, result_expected)
def test_calculate_sn_fold_multiple(self):
infile = os.path.join(curDir, "input", "test_sn_fold_input.csv")
outfile = os.path.join(curDir, "output", "tmp", "sn_fold_output.csv")
output_columns = [
"murcko_smiles",
"sn_smiles",
"fold_id",
"success",
"error_message",
]
output_types = ["object", "object", "int", "bool", "object"]
sa = ScaffoldFoldAssign(
nfolds=self.config["scaffold_folding"]["nfolds"], secret=self.keys["key"]
)
dt = DfTransformer(
sa,
input_columns={"canonical_smiles": "smiles"},
output_columns=output_columns,
output_types=output_types,
)
dt.process_file(infile, outfile)
result = filecmp.cmp(
os.path.join(curDir, "output", "test_sn_fold_output.csv"),
os.path.join(outfile),
shallow=False,
)
self.assertEqual(result, True)
def test_calculate_sn_fold_multiple_split(self):
infile = os.path.join(curDir, "input", "test_sn_fold_input.csv")
outfile = os.path.join(curDir, "output", "tmp", "sn_fold_output.OK.csv")
errfile = os.path.join(curDir, "output", "tmp", "sn_fold_output.failed.csv")
output_columns = [
"murcko_smiles",
"sn_smiles",
"fold_id",
"success",
"error_message",
]
output_types = ["object", "object", "int", "bool", "object"]
sa = ScaffoldFoldAssign(
nfolds=self.config["scaffold_folding"]["nfolds"], secret=self.keys["key"]
)
dt = DfTransformer(
sa,
input_columns={"canonical_smiles": "smiles"},
output_columns=output_columns,
output_types=output_types,
success_column="success",
)
dt.process_file(infile, outfile, error_file=errfile)
result_OK = filecmp.cmp(
os.path.join(curDir, "output", "test_sn_fold_output.OK.csv"),
os.path.join(outfile),
shallow=False,
)
result_failed = filecmp.cmp(
os.path.join(curDir, "output", "test_sn_fold_output.failed.csv"),
os.path.join(errfile),
shallow=False,
)
self.assertEqual(result_OK & result_failed, True)
def test_calculate_sn_fold_multiple_split_par(self):
infile = os.path.join(curDir, "input", "test_sn_fold_input.csv")
outfile = os.path.join(
curDir, "output", "tmp", "sn_fold_output_parallel.OK.csv"
)
errfile = os.path.join(
curDir, "output", "tmp", "sn_fold_output.parallel.failed.csv"
)
output_columns = [
"murcko_smiles",
"sn_smiles",
"fold_id",
"success",
"error_message",
]
output_types = ["object", "object", "int", "bool", "object"]
sa = ScaffoldFoldAssign(
nfolds=self.config["scaffold_folding"]["nfolds"], secret=self.keys["key"]
)
dt = DfTransformer(
sa,
input_columns={"canonical_smiles": "smiles"},
output_columns=output_columns,
output_types=output_types,
success_column="success",
nproc=2,
)
dt.process_file(infile, outfile, error_file=errfile)
result_OK = filecmp.cmp(
os.path.join(curDir, "output", "test_sn_fold_output.OK.csv"),
os.path.join(outfile),
shallow=False,
)
result_failed = filecmp.cmp(
os.path.join(curDir, "output", "test_sn_fold_output.failed.csv"),
os.path.join(errfile),
shallow=False,
)
self.assertEqual(result_OK & result_failed, True)
# if __name__ == "__main__":
# unittest.main()
| 34.604167
| 131
| 0.578868
| 703
| 6,644
| 5.241821
| 0.192034
| 0.035821
| 0.054274
| 0.065129
| 0.802985
| 0.779376
| 0.775577
| 0.735957
| 0.689281
| 0.664858
| 0
| 0.012445
| 0.286424
| 6,644
| 191
| 132
| 34.78534
| 0.764818
| 0.095575
| 0
| 0.598684
| 0
| 0.013158
| 0.201224
| 0.105371
| 0
| 0
| 0
| 0
| 0.032895
| 1
| 0.046053
| false
| 0.006579
| 0.052632
| 0
| 0.105263
| 0.006579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
133b313d7bb8e1d801570a9adbc0be20249229a6
| 59
|
py
|
Python
|
state_management/__init__.py
|
johnurbanik/prospector
|
a014a5b8767320e2a3937a79db2d364f8e6da1c8
|
[
"MIT"
] | null | null | null |
state_management/__init__.py
|
johnurbanik/prospector
|
a014a5b8767320e2a3937a79db2d364f8e6da1c8
|
[
"MIT"
] | null | null | null |
state_management/__init__.py
|
johnurbanik/prospector
|
a014a5b8767320e2a3937a79db2d364f8e6da1c8
|
[
"MIT"
] | null | null | null |
from state_management.question_state import QuestionManager
| 59
| 59
| 0.932203
| 7
| 59
| 7.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 59
| 1
| 59
| 59
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1378827a76a13a19f8d50f59bc4237102dd0f609
| 157
|
py
|
Python
|
tuto/00_hello.py
|
gb6612/py_tuto
|
37f171287c025903cc9003c266618aadcb3f0052
|
[
"MIT"
] | null | null | null |
tuto/00_hello.py
|
gb6612/py_tuto
|
37f171287c025903cc9003c266618aadcb3f0052
|
[
"MIT"
] | null | null | null |
tuto/00_hello.py
|
gb6612/py_tuto
|
37f171287c025903cc9003c266618aadcb3f0052
|
[
"MIT"
] | null | null | null |
# Print something to the terminal
print("Hey there! I'm your very first python script") # you can comment this way...
input("Press Enter to continue...")
| 39.25
| 84
| 0.713376
| 25
| 157
| 4.48
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171975
| 157
| 4
| 85
| 39.25
| 0.861538
| 0.375796
| 0
| 0
| 0
| 0
| 0.752688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1383ca71600696d380c3a0dc6d653c7f95470b2f
| 42
|
py
|
Python
|
src/anu/data/pipelines/__init__.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
src/anu/data/pipelines/__init__.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
src/anu/data/pipelines/__init__.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
"""Pipeline related modules stay here."""
| 21
| 41
| 0.714286
| 5
| 42
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.810811
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13855a0431b7e519d6002edd45076f4b8a4e3e82
| 274
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core/serializers.py
|
e-dang/cookiecutter-django
|
2ba986296de1d5a086e73cde746d6fc7366f149c
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core/serializers.py
|
e-dang/cookiecutter-django
|
2ba986296de1d5a086e73cde746d6fc7366f149c
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core/serializers.py
|
e-dang/cookiecutter-django
|
2ba986296de1d5a086e73cde746d6fc7366f149c
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
class DetailResponseSerializer(serializers.Serializer):
detail = serializers.CharField(read_only=True)
class NonFieldErrorResponseSerializer(serializers.Serializer):
non_field_errors = serializers.CharField(read_only=True)
| 27.4
| 62
| 0.839416
| 27
| 274
| 8.333333
| 0.62963
| 0.186667
| 0.213333
| 0.248889
| 0.284444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094891
| 274
| 9
| 63
| 30.444444
| 0.907258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
1390effde3c896a583eb3f16ff3d4d18fdbdfed3
| 36
|
py
|
Python
|
ci/infra/testrunner/kubectl/__init__.py
|
manuelbuil/skuba
|
71770c969f59275d6f7fb7a788635fcce6900bee
|
[
"Apache-2.0"
] | 72
|
2019-07-18T13:01:36.000Z
|
2022-03-05T04:14:06.000Z
|
ci/infra/testrunner/kubectl/__init__.py
|
manuelbuil/skuba
|
71770c969f59275d6f7fb7a788635fcce6900bee
|
[
"Apache-2.0"
] | 602
|
2019-07-18T13:48:04.000Z
|
2021-09-27T14:10:30.000Z
|
ci/infra/testrunner/kubectl/__init__.py
|
manuelbuil/skuba
|
71770c969f59275d6f7fb7a788635fcce6900bee
|
[
"Apache-2.0"
] | 90
|
2019-07-18T09:27:52.000Z
|
2020-12-08T15:57:27.000Z
|
from kubectl.kubectl import Kubectl
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
13d237c5d12d316f1d6ca20a7653418084042ff1
| 56
|
py
|
Python
|
experiments/performance_eval/DeepDTA/data_utils/__init_.py
|
giosumarin/compare_dnn_compression
|
7de9760af6b2031c588dc6c09bbfc3ca33fd14da
|
[
"Apache-2.0"
] | 1
|
2022-03-18T11:05:15.000Z
|
2022-03-18T11:05:15.000Z
|
experiments/performance_eval/DeepDTA/data_utils/__init_.py
|
giosumarin/compare_dnn_compression
|
7de9760af6b2031c588dc6c09bbfc3ca33fd14da
|
[
"Apache-2.0"
] | null | null | null |
experiments/performance_eval/DeepDTA/data_utils/__init_.py
|
giosumarin/compare_dnn_compression
|
7de9760af6b2031c588dc6c09bbfc3ca33fd14da
|
[
"Apache-2.0"
] | null | null | null |
from data_utils import datahelper_noflag, davis_dataset
| 28
| 55
| 0.892857
| 8
| 56
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 56
| 1
| 56
| 56
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13ecc0f54abe3c4aa3e598fb12acf12f6af1f400
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/chardet/euctwprober.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/chardet/euctwprober.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/chardet/euctwprober.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/d7/7a/7a/10fe3245ac6a9cfe221edc47389e91db3c47ab5fe6f214d18f3559f797
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 96
| 1
| 96
| 96
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b92fe2d7eaa82c839735f3a432ffead8cb48f5c4
| 435
|
py
|
Python
|
gpseqc/__init__.py
|
ggirelli/gpseq-centrality-estimate
|
9e911c360e2abddc688ea3fb4390bc0f8e2bfed3
|
[
"MIT"
] | 1
|
2020-08-21T07:19:49.000Z
|
2020-08-21T07:19:49.000Z
|
gpseqc/__init__.py
|
ggirelli/gpseq-centrality-estimate
|
9e911c360e2abddc688ea3fb4390bc0f8e2bfed3
|
[
"MIT"
] | 1
|
2019-01-29T08:21:21.000Z
|
2019-01-29T08:21:21.000Z
|
gpseqc/__init__.py
|
ggirelli/gpseq-centrality-estimate
|
9e911c360e2abddc688ea3fb4390bc0f8e2bfed3
|
[
"MIT"
] | 2
|
2020-07-16T11:09:22.000Z
|
2020-08-21T07:19:54.000Z
|
# -*- coding: utf-8 -*-
'''
@author: Gabriele Girelli
@email: gigi.ga90@gmail.com
@description: GPSeq Centrality Estimation package.
'''
# DEPENDENCIES =================================================================
from gpseqc import bed, centrality, cutsite_domain
# END ==========================================================================
################################################################################
| 29
| 80
| 0.351724
| 25
| 435
| 6.08
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007481
| 0.078161
| 435
| 15
| 81
| 29
| 0.371571
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b93ccaf55a14de355d35bc247f3e9dc878f674d4
| 95
|
py
|
Python
|
netbox_secretstore/utils/tables.py
|
Onemind-Services-LLC/netbox-secretstore
|
cfe8b813ed5997b0b8566d00cca90991fc87b55b
|
[
"Apache-2.0"
] | null | null | null |
netbox_secretstore/utils/tables.py
|
Onemind-Services-LLC/netbox-secretstore
|
cfe8b813ed5997b0b8566d00cca90991fc87b55b
|
[
"Apache-2.0"
] | null | null | null |
netbox_secretstore/utils/tables.py
|
Onemind-Services-LLC/netbox-secretstore
|
cfe8b813ed5997b0b8566d00cca90991fc87b55b
|
[
"Apache-2.0"
] | null | null | null |
from netbox.tables import columns
class PluginButtonsColumn(columns.ActionsColumn):
pass
| 15.833333
| 49
| 0.810526
| 10
| 95
| 7.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 5
| 50
| 19
| 0.939024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
b93f44ed4fed0f68ed64c8ef026f32b834f41a4c
| 223
|
py
|
Python
|
EJERCICIO2/animal.py
|
Germiprogramer/PRIMERPARCIAL
|
01a2c95a4b259829ad04e5439beae53e6734b034
|
[
"Apache-2.0"
] | null | null | null |
EJERCICIO2/animal.py
|
Germiprogramer/PRIMERPARCIAL
|
01a2c95a4b259829ad04e5439beae53e6734b034
|
[
"Apache-2.0"
] | null | null | null |
EJERCICIO2/animal.py
|
Germiprogramer/PRIMERPARCIAL
|
01a2c95a4b259829ad04e5439beae53e6734b034
|
[
"Apache-2.0"
] | null | null | null |
class Animal:
def __init__(self, nombre, tamaño):
self.nombre = nombre
self.tamaño = tamaño
def get_nombre(self):
return self.nombre
def set_nombre(self, a):
self.nombre = a
| 22.3
| 39
| 0.591928
| 28
| 223
| 4.5
| 0.392857
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318386
| 223
| 10
| 40
| 22.3
| 0.828947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b95aac929561cfbf233253bbd8e7225b97cea630
| 206
|
py
|
Python
|
deploy/resources/deployrpm.py
|
zhxiaohe/starwars_api
|
f1b729e819eb19e5eb59630bed56b13127eb1ef2
|
[
"MIT"
] | null | null | null |
deploy/resources/deployrpm.py
|
zhxiaohe/starwars_api
|
f1b729e819eb19e5eb59630bed56b13127eb1ef2
|
[
"MIT"
] | null | null | null |
deploy/resources/deployrpm.py
|
zhxiaohe/starwars_api
|
f1b729e819eb19e5eb59630bed56b13127eb1ef2
|
[
"MIT"
] | null | null | null |
from flask.ext.restful import Resource
from common.util import login_required
class DeployManager(Resource):
method_decorators = [login_required]
def get(self):
return 'Auth Token success'
| 25.75
| 40
| 0.757282
| 26
| 206
| 5.884615
| 0.807692
| 0.169935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174757
| 206
| 8
| 41
| 25.75
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
b989c9fe75cef548c3f61dbcc278a1093416f782
| 566
|
py
|
Python
|
tasks/dirs.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | 2
|
2020-08-27T08:40:22.000Z
|
2021-04-14T15:42:09.000Z
|
tasks/dirs.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
tasks/dirs.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
import os
import seaice.nasateam as nt
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
def parent_dir():
return os.path.dirname(this_dir())
def default_config_dir():
return os.path.join(parent_dir(), 'seaice.tools', 'configs')
def output_image_dir():
return os.path.join(parent_dir(), 'test_output_images')
def output_dir():
return os.path.join(parent_dir(), 'test_output')
def data_dir():
return os.path.join(parent_dir(), 'data', 'xlsify')
def datastore_directory():
return nt.DATA_STORE_BASE_DIRECTORY
| 17.6875
| 64
| 0.712014
| 84
| 566
| 4.511905
| 0.357143
| 0.110818
| 0.174142
| 0.237467
| 0.46438
| 0.348285
| 0.348285
| 0.200528
| 0.200528
| 0
| 0
| 0
| 0.146643
| 566
| 31
| 65
| 18.258065
| 0.784679
| 0
| 0
| 0
| 0
| 0
| 0.102474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4375
| true
| 0
| 0.125
| 0.4375
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b98c77ac54f0888487feee1d4446a0c0fcee1a1e
| 81
|
py
|
Python
|
segmentfault/apps/circle/tasks.py
|
Yookyiss/segmentfault
|
8fb7890c8b650ac34541a8fb14c3cd9bef98d120
|
[
"MIT"
] | null | null | null |
segmentfault/apps/circle/tasks.py
|
Yookyiss/segmentfault
|
8fb7890c8b650ac34541a8fb14c3cd9bef98d120
|
[
"MIT"
] | 12
|
2020-02-12T01:14:42.000Z
|
2022-03-11T23:54:43.000Z
|
segmentfault/apps/circle/tasks.py
|
Yookyiss/segmentfault
|
8fb7890c8b650ac34541a8fb14c3cd9bef98d120
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# @Time : 2019/7/29 11:39 PM
# @Author : __wutonghe__
| 20.25
| 31
| 0.555556
| 12
| 81
| 3.416667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.222222
| 81
| 3
| 32
| 27
| 0.460317
| 0.91358
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b99ab66bd66c9cd85ce315540478197f26f753a7
| 248
|
py
|
Python
|
backend/main/views.py
|
adityamittl/ThankHut
|
caf76c12f90b08c1b7d449930bf51a975cd8959f
|
[
"MIT"
] | null | null | null |
backend/main/views.py
|
adityamittl/ThankHut
|
caf76c12f90b08c1b7d449930bf51a975cd8959f
|
[
"MIT"
] | null | null | null |
backend/main/views.py
|
adityamittl/ThankHut
|
caf76c12f90b08c1b7d449930bf51a975cd8959f
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'index.html')
def note(request):
return render(request, 'note.html')
def send(request):
return render(request, 'send.html')
| 15.5
| 40
| 0.705645
| 33
| 248
| 5.30303
| 0.484848
| 0.222857
| 0.325714
| 0.445714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173387
| 248
| 15
| 41
| 16.533333
| 0.853659
| 0.092742
| 0
| 0
| 0
| 0
| 0.125561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
b9cde4d25a4ccb5a12098cf998eae765df2e7aed
| 3,485
|
py
|
Python
|
server/camphoric/migrations/0015_auto_20201128_0134.py
|
willfulbard/camphoric
|
32e30e88b97a905dbde00229a34f01cb05316e08
|
[
"MIT"
] | 2
|
2020-09-25T01:20:14.000Z
|
2021-08-18T18:49:47.000Z
|
server/camphoric/migrations/0015_auto_20201128_0134.py
|
camphoric/camphoric
|
31ad94d0da61cd649fe55b74adfb83196eef0011
|
[
"MIT"
] | 57
|
2020-05-30T03:22:56.000Z
|
2022-03-07T01:52:11.000Z
|
server/camphoric/migrations/0015_auto_20201128_0134.py
|
camphoric/camphoric
|
31ad94d0da61cd649fe55b74adfb83196eef0011
|
[
"MIT"
] | 1
|
2020-01-24T04:30:07.000Z
|
2020-01-24T04:30:07.000Z
|
# Generated by Django 3.1.2 on 2020-11-28 01:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('camphoric', '0014_auto_20201025_0058'),
]
operations = [
migrations.AlterField(
model_name='camper',
name='attributes',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='deposit',
name='attributes',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='event',
name='camper_pricing_logic',
field=models.JSONField(help_text='JsonLogic Camper-level pricing components', null=True),
),
migrations.AlterField(
model_name='event',
name='camper_schema',
field=models.JSONField(help_text='JSON schema for Camper.attributes', null=True),
),
migrations.AlterField(
model_name='event',
name='confirmation_email_template',
field=models.JSONField(default=list, help_text='JsonLogic template'),
),
migrations.AlterField(
model_name='event',
name='confirmation_page_template',
field=models.JSONField(default=list, help_text='JsonLogic template'),
),
migrations.AlterField(
model_name='event',
name='deposit_schema',
field=models.JSONField(help_text='JSON schema for Deposit.attributes', null=True),
),
migrations.AlterField(
model_name='event',
name='payment_schema',
field=models.JSONField(help_text='JSON schema for Payment.attributes', null=True),
),
migrations.AlterField(
model_name='event',
name='pricing',
field=models.JSONField(help_text='key-value object with pricing variables', null=True),
),
migrations.AlterField(
model_name='event',
name='registration_pricing_logic',
field=models.JSONField(help_text='JsonLogic Registration-level pricing components', null=True),
),
migrations.AlterField(
model_name='event',
name='registration_schema',
field=models.JSONField(help_text='JSON schema for Registration.attributes', null=True),
),
migrations.AlterField(
model_name='event',
name='registration_ui_schema',
field=models.JSONField(help_text='react-jsonschema-form uiSchema for registration form', null=True),
),
migrations.AlterField(
model_name='payment',
name='attributes',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='registration',
name='attributes',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='registration',
name='client_reported_pricing',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='registration',
name='server_pricing_results',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='registrationtype',
name='invitation_email_template',
field=models.JSONField(null=True),
),
]
| 35.20202
| 112
| 0.586227
| 320
| 3,485
| 6.228125
| 0.221875
| 0.170597
| 0.213246
| 0.247366
| 0.784747
| 0.747617
| 0.711992
| 0.69995
| 0.650778
| 0.363773
| 0
| 0.012789
| 0.304448
| 3,485
| 98
| 113
| 35.561224
| 0.809406
| 0.012912
| 0
| 0.652174
| 1
| 0
| 0.234729
| 0.069226
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.01087
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9e19abcd38a18b4cb4b386c5ea53016f616a174
| 480
|
py
|
Python
|
src/python/pants/backend/experimental/python/lint/autoflake/register.py
|
bastianwegge/pants
|
43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/experimental/python/lint/autoflake/register.py
|
bastianwegge/pants
|
43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2
|
[
"Apache-2.0"
] | 22
|
2022-01-27T09:59:50.000Z
|
2022-03-30T07:06:49.000Z
|
src/python/pants/backend/experimental/python/lint/autoflake/register.py
|
ryanking/pants
|
e45b00d2eb467b599966bca262405a5d74d27bdd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Autoformatter for removing unused Python imports.
See https://github.com/myint/autoflake for details.
"""
from pants.backend.python.lint.autoflake import rules as autoflake_rules
from pants.backend.python.lint.autoflake import skip_field, subsystem
def rules():
return (*autoflake_rules.rules(), *skip_field.rules(), *subsystem.rules())
| 32
| 78
| 0.775
| 64
| 480
| 5.75
| 0.59375
| 0.048913
| 0.086957
| 0.119565
| 0.222826
| 0.222826
| 0.222826
| 0
| 0
| 0
| 0
| 0.014118
| 0.114583
| 480
| 14
| 79
| 34.285714
| 0.851765
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
6a130d9c4062cb3d5da7db7c6acb1a9244178580
| 140
|
py
|
Python
|
while_loop/lab/sum_numbers.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
while_loop/lab/sum_numbers.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
while_loop/lab/sum_numbers.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
target_num = int(input())
sum_nums = 0
while sum_nums < target_num:
input_num = int(input())
sum_nums += input_num
print(sum_nums)
| 17.5
| 28
| 0.692857
| 23
| 140
| 3.869565
| 0.391304
| 0.314607
| 0.247191
| 0.314607
| 0.404494
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.185714
| 140
| 8
| 29
| 17.5
| 0.77193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a29d67e627d3f690a26eb927960da3aa98f0cd7
| 8,083
|
py
|
Python
|
tests/test_message_handler/test_strategies/test_utils.py
|
Zapix/mtpylon
|
b268a4e2d1bc641cace1962ea68de73c1156e44c
|
[
"MIT"
] | 9
|
2021-11-10T08:53:51.000Z
|
2021-12-15T12:03:44.000Z
|
tests/test_message_handler/test_strategies/test_utils.py
|
Zapix/mtpylon
|
b268a4e2d1bc641cace1962ea68de73c1156e44c
|
[
"MIT"
] | 123
|
2020-10-22T07:08:20.000Z
|
2021-09-29T15:26:22.000Z
|
tests/test_message_handler/test_strategies/test_utils.py
|
Zapix/mtpylon
|
b268a4e2d1bc641cace1962ea68de73c1156e44c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from mtpylon import long, int128
from mtpylon.messages import UnencryptedMessage, EncryptedMessage
from mtpylon.serialization import CallableFunc
from mtpylon.message_handler.strategies.utils import (
is_unencrypted_message,
is_rpc_call_message,
is_container_message,
is_msgs_ack,
)
from mtpylon.service_schema.functions import req_pq, ping
from mtpylon.service_schema.constructors import (
MsgsAck,
MessageContainer,
Message
)
from tests.simpleschema import set_task
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
]
)
def test_is_unencrypted_message_true(message):
assert is_unencrypted_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data='wrong data',
),
id='unencrypted message wrong rpc call'
),
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world!'}
),
),
id='unencrypted message wrong rpc call'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='Wrong message data'
),
id='encrypted message'
)
]
)
def test_is_unencrypted_message_false(message):
assert not is_unencrypted_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world!'}
)
),
id='encrypted message'
),
pytest.param(
Message(
msg_id=long(0x60a4d9830000001c),
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='message constructor'
),
]
)
def test_is_rpc_call_true(message):
assert is_rpc_call_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='Wrong message data'
),
id='encrypted message wrong data'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data='some un expected data'
),
id='encrypted message ping call'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_rpc_call_message_false(message):
assert not is_rpc_call_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_not_container_message(message):
assert not is_container_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=MessageContainer(
messages=[
Message(
msg_id=long(0x5e0b700a00000000),
seqno=7,
bytes=20,
body=MsgsAck(
msg_ids=[
long(1621416313)
]
),
),
Message(
msg_id=long(0x60a4d9830000001c),
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
]
)
),
),
]
)
def test_is_container_message(message):
assert is_container_message(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
UnencryptedMessage(
message_id=long(0x51e57ac42770964a),
message_data=CallableFunc(
func=req_pq,
params={'nonce': int128(234234)}
),
),
id='unencrypted message'
),
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=CallableFunc(
func=ping,
params={'ping_id': long(111)},
)
),
id='encrypted message ping call'
)
]
)
def test_is_not_msgs_ack(message):
assert not is_msgs_ack(message)
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=long(0x51e57ac42770964a),
session_id=long(1),
salt=long(2),
seq_no=0,
message_data=MsgsAck(
msg_ids=[
long(0x51e57ac42770964a),
long(0x60a4d9830000001c),
]
)
),
id='encrypted msgs ack'
)
]
)
def test_is_msgs_ack(message):
assert is_msgs_ack(message)
| 27.493197
| 65
| 0.462328
| 633
| 8,083
| 5.703002
| 0.131122
| 0.049862
| 0.054017
| 0.128809
| 0.763989
| 0.751524
| 0.722992
| 0.722992
| 0.706925
| 0.706371
| 0
| 0.081258
| 0.453421
| 8,083
| 293
| 66
| 27.587031
| 0.735853
| 0.002598
| 0
| 0.676471
| 0
| 0
| 0.073077
| 0
| 0
| 0
| 0.044665
| 0
| 0.029412
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a2b01f2dd148676aeb7010da99d2ca3c7268928
| 95
|
py
|
Python
|
ip_rep/database_connector/views.py
|
logicbomb-1/ARTIF
|
8f5cc38ab2e986ee39cbc0328aac0d825c1915b4
|
[
"MIT"
] | 205
|
2021-06-21T13:49:14.000Z
|
2022-02-17T05:10:50.000Z
|
ip_rep/database_connector/views.py
|
0xDivyanshu/ARTIF
|
946ea8ea7b60e2d81b35ae820a07241ecf25e9b0
|
[
"MIT"
] | 3
|
2021-07-11T08:08:01.000Z
|
2021-09-22T20:01:03.000Z
|
ip_rep/database_connector/views.py
|
0xDivyanshu/ARTIF
|
946ea8ea7b60e2d81b35ae820a07241ecf25e9b0
|
[
"MIT"
] | 35
|
2021-06-21T10:35:07.000Z
|
2021-10-11T07:39:12.000Z
|
from django.shortcuts import render
from pymongo import MongoClient
# Create your views here.
| 19
| 35
| 0.821053
| 13
| 95
| 6
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147368
| 95
| 4
| 36
| 23.75
| 0.962963
| 0.242105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a37d0bd32839f17d66049b5827401a0cbb335da
| 31
|
py
|
Python
|
py/desispec/_version.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
py/desispec/_version.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
py/desispec/_version.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = '0.49.1.dev6472'
| 15.5
| 30
| 0.709677
| 5
| 31
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.096774
| 31
| 1
| 31
| 31
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a3d9d092ffdf84d2327df789bc2cdc2c5ecf639
| 439
|
py
|
Python
|
tests/test_mocks_generator.py
|
yairhoresh/bias-detector
|
0bc28f646c98aee33c93a6c0999a0b05c4882a61
|
[
"MIT"
] | 50
|
2021-02-02T19:27:01.000Z
|
2021-12-22T22:10:02.000Z
|
tests/test_mocks_generator.py
|
yairhoresh/bias-detector
|
0bc28f646c98aee33c93a6c0999a0b05c4882a61
|
[
"MIT"
] | 1
|
2022-02-24T08:03:39.000Z
|
2022-02-24T08:03:39.000Z
|
tests/test_mocks_generator.py
|
LaudateCorpus1/bias-detector
|
c9229b8928f3bdee582039622dc142b623b37467
|
[
"MIT"
] | 10
|
2021-02-04T23:33:26.000Z
|
2022-02-28T17:13:30.000Z
|
from .mocks_generator import *
def test_generate_mocks():
first_names_mock, last_names_mock, zip_codes_mock, emails_mock, y_scores_mock, y_pred_mock, y_true_mock = generate_mocks(10)
assert len(first_names_mock) == 10
assert len(last_names_mock) == 10
assert len(zip_codes_mock) == 10
assert len(emails_mock) == 10
assert len(y_scores_mock) == 10
assert len(y_pred_mock) == 10
assert len(y_true_mock) == 10
| 39.909091
| 128
| 0.733485
| 72
| 439
| 4.055556
| 0.291667
| 0.191781
| 0.263699
| 0.308219
| 0.30137
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043956
| 0.170843
| 439
| 11
| 129
| 39.909091
| 0.758242
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.7
| 1
| 0.1
| true
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e003c3686dbddde727f2a55cd6d3e1b10d25e58e
| 201
|
py
|
Python
|
src/dfd/datasets/modifications/__init__.py
|
cicheck/dfd
|
b02752f958cfea2f85222e2b4b3ba7e265a6152d
|
[
"MIT"
] | null | null | null |
src/dfd/datasets/modifications/__init__.py
|
cicheck/dfd
|
b02752f958cfea2f85222e2b4b3ba7e265a6152d
|
[
"MIT"
] | 2
|
2021-12-31T17:44:20.000Z
|
2021-12-31T19:51:11.000Z
|
src/dfd/datasets/modifications/__init__.py
|
cicheck/dfd
|
b02752f958cfea2f85222e2b4b3ba7e265a6152d
|
[
"MIT"
] | null | null | null |
"""Definitions of frame level modifications.
Each modification takes as input single frame and outputs modified frame.
"""
from dfd.datasets.modifications.definitions.clahe import CLAHEModification
| 25.125
| 74
| 0.820896
| 24
| 201
| 6.875
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 201
| 7
| 75
| 28.714286
| 0.932203
| 0.577114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e008ef0dfb77ddde56a895453636f91569106be5
| 170
|
py
|
Python
|
comtypes/test/setup.py
|
phuslu/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 23
|
2015-05-28T15:31:35.000Z
|
2022-02-16T07:51:34.000Z
|
comtypes/test/setup.py
|
kar98kar/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 3
|
2020-05-19T03:00:52.000Z
|
2020-11-03T09:22:51.000Z
|
comtypes/test/setup.py
|
kar98kar/pyMSAA
|
611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0
|
[
"MIT"
] | 13
|
2016-08-26T23:00:40.000Z
|
2022-03-03T09:58:36.000Z
|
# all the unittests can be converted to exe-files.
from distutils.core import setup
import glob
import py2exe
setup(name='test_*', console=glob.glob("test_*.py"))
| 24.285714
| 53
| 0.735294
| 26
| 170
| 4.730769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0.152941
| 170
| 6
| 54
| 28.333333
| 0.847222
| 0.282353
| 0
| 0
| 0
| 0
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e03bf7f0b753932fd48e0d252639cad3d22fea6d
| 86
|
py
|
Python
|
pyFiDEL/__init__.py
|
sungcheolkim78/pyFiDEL
|
670067b12a2efd276e23382251ec612af678731f
|
[
"Apache-2.0"
] | null | null | null |
pyFiDEL/__init__.py
|
sungcheolkim78/pyFiDEL
|
670067b12a2efd276e23382251ec612af678731f
|
[
"Apache-2.0"
] | null | null | null |
pyFiDEL/__init__.py
|
sungcheolkim78/pyFiDEL
|
670067b12a2efd276e23382251ec612af678731f
|
[
"Apache-2.0"
] | null | null | null |
from .simulator import SimClassifier
from .pcr import PCR
from .ensemble import FiDEL
| 21.5
| 36
| 0.825581
| 12
| 86
| 5.916667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 3
| 37
| 28.666667
| 0.959459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e073e43fb191ceedd3b610406c7ce78e3814aeae
| 27
|
py
|
Python
|
msgflow/__init__.py
|
noriyukipy/smilechat
|
a9c0ef93c35b2a1f3e9d1700391ae865544adfbc
|
[
"MIT"
] | 2
|
2020-09-19T07:57:28.000Z
|
2020-09-20T10:41:42.000Z
|
msgflow/__init__.py
|
noriyukipy/smilechat
|
a9c0ef93c35b2a1f3e9d1700391ae865544adfbc
|
[
"MIT"
] | null | null | null |
msgflow/__init__.py
|
noriyukipy/smilechat
|
a9c0ef93c35b2a1f3e9d1700391ae865544adfbc
|
[
"MIT"
] | 2
|
2020-09-20T10:41:51.000Z
|
2020-11-09T06:15:32.000Z
|
from .bot import Messenger
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0ec07f317ec396e2b695fa4589614010547b3cb8
| 151
|
py
|
Python
|
pdfplay/views.py
|
dbads/pdf_processing
|
5ab60cf0ad8e93c8619ed3cc77ddd420f0a3abf5
|
[
"MIT"
] | null | null | null |
pdfplay/views.py
|
dbads/pdf_processing
|
5ab60cf0ad8e93c8619ed3cc77ddd420f0a3abf5
|
[
"MIT"
] | 9
|
2020-02-12T03:23:57.000Z
|
2022-01-13T01:58:36.000Z
|
pdfplay/views.py
|
dbads/pdf_processing
|
5ab60cf0ad8e93c8619ed3cc77ddd420f0a3abf5
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
def pdf_play(request):
return render(request, 'pdf_play.html', {
})
| 21.571429
| 64
| 0.695364
| 20
| 151
| 5
| 0.8
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.205298
| 151
| 7
| 65
| 21.571429
| 0.808333
| 0
| 0
| 0
| 0
| 0
| 0.085526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0ef313666d9a8df945780e013fe54f9e8669cfeb
| 133
|
py
|
Python
|
attic/gui/accounting/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2015-11-04T16:37:39.000Z
|
2015-11-04T16:37:39.000Z
|
attic/gui/accounting/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | null | null | null |
attic/gui/accounting/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2020-03-05T02:50:43.000Z
|
2020-03-05T02:50:43.000Z
|
from .authenticatordialog import AuthenticatorDialog
from .projectmanager import ProjectManager
from .usermanager import UserManager
| 33.25
| 52
| 0.887218
| 12
| 133
| 9.833333
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090226
| 133
| 3
| 53
| 44.333333
| 0.975207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1609886486077d333ed1462e0148def80f39d1ed
| 207
|
py
|
Python
|
read.py
|
pedrocaseiro/twitch-bot
|
12661901a0bea463508334250c9886abc3ca776b
|
[
"MIT"
] | 1
|
2017-01-08T17:43:18.000Z
|
2017-01-08T17:43:18.000Z
|
read.py
|
pedrocaseiro/twitch-bot
|
12661901a0bea463508334250c9886abc3ca776b
|
[
"MIT"
] | null | null | null |
read.py
|
pedrocaseiro/twitch-bot
|
12661901a0bea463508334250c9886abc3ca776b
|
[
"MIT"
] | null | null | null |
import string
def getUser(line):
separate = line.split(":", 2)
user = separate[1].split("!", 1)[0]
return user
def getMessage(line):
separate = line.split(":", 2)
message = separate[2]
return message
| 18.818182
| 36
| 0.666667
| 29
| 207
| 4.758621
| 0.482759
| 0.173913
| 0.231884
| 0.304348
| 0.318841
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034286
| 0.154589
| 207
| 11
| 37
| 18.818182
| 0.754286
| 0
| 0
| 0.222222
| 0
| 0
| 0.014423
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
161cfa9f4872e789ebc8897cfa9cfb2e456af9ed
| 67
|
py
|
Python
|
stubs/asn1crypto/algos.py
|
joernheissler/chipcard
|
1ec1da0a1575f8d9735ae14fb54df6a20e654fbb
|
[
"MIT"
] | 1
|
2020-04-23T09:13:33.000Z
|
2020-04-23T09:13:33.000Z
|
stubs/asn1crypto/algos.py
|
joernheissler/chipcard
|
1ec1da0a1575f8d9735ae14fb54df6a20e654fbb
|
[
"MIT"
] | null | null | null |
stubs/asn1crypto/algos.py
|
joernheissler/chipcard
|
1ec1da0a1575f8d9735ae14fb54df6a20e654fbb
|
[
"MIT"
] | null | null | null |
from .core import Sequence
class DHParameters(Sequence):
...
| 11.166667
| 29
| 0.701493
| 7
| 67
| 6.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 67
| 5
| 30
| 13.4
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
161de2dc5dd7767243b2e68991638c51774852bf
| 57
|
py
|
Python
|
rio_viz/templates/__init__.py
|
dzanaga/rio-viz
|
88ec2dec0d69c9d13e5485af7315cc7b56a25963
|
[
"MIT"
] | null | null | null |
rio_viz/templates/__init__.py
|
dzanaga/rio-viz
|
88ec2dec0d69c9d13e5485af7315cc7b56a25963
|
[
"MIT"
] | null | null | null |
rio_viz/templates/__init__.py
|
dzanaga/rio-viz
|
88ec2dec0d69c9d13e5485af7315cc7b56a25963
|
[
"MIT"
] | null | null | null |
"""rio_viz.templates"""
from .template import * # noqa
| 14.25
| 31
| 0.666667
| 7
| 57
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 57
| 3
| 32
| 19
| 0.770833
| 0.403509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16574e03be20d970ce586fdad8a048a93410bda9
| 120
|
py
|
Python
|
amd64-linux/lib/python/mod_pmppc_components_gcommands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 1
|
2020-06-15T10:41:18.000Z
|
2020-06-15T10:41:18.000Z
|
amd64-linux/lib/python/mod_pmppc_components_gcommands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | null | null | null |
amd64-linux/lib/python/mod_pmppc_components_gcommands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 3
|
2020-08-10T10:25:02.000Z
|
2021-09-12T01:12:09.000Z
|
## Copyright 2005-2007 Virtutech AB
from components import register_components
register_components('pmppc-components')
| 30
| 42
| 0.841667
| 14
| 120
| 7.071429
| 0.714286
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073395
| 0.091667
| 120
| 3
| 43
| 40
| 0.834862
| 0.266667
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1659437885a7b7d145d957a21b895af0ef1a74c1
| 140
|
py
|
Python
|
src/tutorial/todo/admin.py
|
okwrtdsh/django_tutorial
|
3125b9a9b1d606626507d01142c4437d03909e0a
|
[
"MIT"
] | null | null | null |
src/tutorial/todo/admin.py
|
okwrtdsh/django_tutorial
|
3125b9a9b1d606626507d01142c4437d03909e0a
|
[
"MIT"
] | null | null | null |
src/tutorial/todo/admin.py
|
okwrtdsh/django_tutorial
|
3125b9a9b1d606626507d01142c4437d03909e0a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from tutorial.todo.models import ToDoUser, ToDo
admin.site.register(ToDoUser)
admin.site.register(ToDo)
| 17.5
| 47
| 0.814286
| 20
| 140
| 5.7
| 0.55
| 0.157895
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 140
| 7
| 48
| 20
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1661fb8cb169b10d9c0631cbd2b573355ff40f8d
| 47
|
py
|
Python
|
cli_toolkit/tests/__init__.py
|
hile/cli-toolkit
|
3dbd6b97e69cc0b9a3b9facb33e0a4e6b7d1bc33
|
[
"PSF-2.0"
] | null | null | null |
cli_toolkit/tests/__init__.py
|
hile/cli-toolkit
|
3dbd6b97e69cc0b9a3b9facb33e0a4e6b7d1bc33
|
[
"PSF-2.0"
] | null | null | null |
cli_toolkit/tests/__init__.py
|
hile/cli-toolkit
|
3dbd6b97e69cc0b9a3b9facb33e0a4e6b7d1bc33
|
[
"PSF-2.0"
] | null | null | null |
"""
Unit testing utilities for cli-toolkit
"""
| 11.75
| 38
| 0.702128
| 6
| 47
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 3
| 39
| 15.666667
| 0.825
| 0.808511
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1675ea3cb14efbd54dd264b56885fd47e00fd832
| 106
|
py
|
Python
|
pystac/models/__init__.py
|
geobeyond/py-stac
|
f15f3511737c35fca27da21b38a98960e21f4293
|
[
"Apache-2.0"
] | null | null | null |
pystac/models/__init__.py
|
geobeyond/py-stac
|
f15f3511737c35fca27da21b38a98960e21f4293
|
[
"Apache-2.0"
] | null | null | null |
pystac/models/__init__.py
|
geobeyond/py-stac
|
f15f3511737c35fca27da21b38a98960e21f4293
|
[
"Apache-2.0"
] | null | null | null |
from .asset import Asset
from .item import Item
from .link import Link
from .properties import Properties
| 21.2
| 34
| 0.811321
| 16
| 106
| 5.375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 106
| 4
| 35
| 26.5
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16a98307cd93bbd4adf4ccec212b1f7a2f14ae74
| 40
|
py
|
Python
|
flagger/exception.py
|
bbenabbes/flagger
|
d2ff30821b4fdef008f6e6ae6b4fb752e40c2d7e
|
[
"MIT"
] | 1
|
2019-05-15T11:07:27.000Z
|
2019-05-15T11:07:27.000Z
|
flagger/exception.py
|
bbenabbes/flagger
|
d2ff30821b4fdef008f6e6ae6b4fb752e40c2d7e
|
[
"MIT"
] | null | null | null |
flagger/exception.py
|
bbenabbes/flagger
|
d2ff30821b4fdef008f6e6ae6b4fb752e40c2d7e
|
[
"MIT"
] | 1
|
2020-01-03T10:14:56.000Z
|
2020-01-03T10:14:56.000Z
|
class FlaggerError(Exception):
pass
| 13.333333
| 30
| 0.75
| 4
| 40
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 2
| 31
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bc4cbbda9a587e154bdcd6b9d5f851ad3d51d31d
| 841
|
py
|
Python
|
dplex.py
|
Dream7-Kim/graduation_code
|
be1808f90589c08d7283a8e12f52e22a5749c27d
|
[
"MIT"
] | null | null | null |
dplex.py
|
Dream7-Kim/graduation_code
|
be1808f90589c08d7283a8e12f52e22a5749c27d
|
[
"MIT"
] | null | null | null |
dplex.py
|
Dream7-Kim/graduation_code
|
be1808f90589c08d7283a8e12f52e22a5749c27d
|
[
"MIT"
] | null | null | null |
import jax.numpy as np
import logging
def deinsum(subscript, aa, bb):
real = np.einsum(subscript, aa[0], bb[0]) - np.einsum(subscript, aa[1], bb[1])
imag = np.einsum(subscript, aa[0], bb[1]) + np.einsum(subscript, aa[1], bb[0])
return np.stack([real, imag], axis=0)
def deinsum_ord(subscript, aa, bb):
real = np.einsum(subscript, aa, bb[0])
imag = np.einsum(subscript, aa, bb[1])
return np.stack([real, imag], axis=0)
def dabs(aa):
return aa[0]**2 + aa[1]**2 # 因为是纵向叠加所以aa[0]是第一行
def dconj(aa):
return dplex(aa.val[0], -aa.val[1])
def dtomine(aa):
return np.stack([np.real(aa), np.imag(aa)], axis=0)
def dconstruct(aa, bb):
return np.stack([aa, bb], axis=0) # 纵向叠加数组
def ddivide(a, bb):
real = a * bb[0] / dabs(bb)
imag = -a * bb[1] / dabs(bb)
return np.stack([real, imag], axis=0)
| 29
| 82
| 0.612366
| 149
| 841
| 3.449664
| 0.214765
| 0.171206
| 0.198444
| 0.22179
| 0.498054
| 0.437743
| 0.303502
| 0.252918
| 0
| 0
| 0
| 0.035139
| 0.187872
| 841
| 29
| 83
| 29
| 0.717423
| 0.029727
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318182
| false
| 0
| 0.090909
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
bc68ca7afbfdf0fb3c5a9889f8e42dcfb454d170
| 182
|
py
|
Python
|
pyleus/__init__.py
|
dapuck/pyleus
|
f5c4a06cf8351d0c6bc28b07edbe99025455409c
|
[
"Apache-2.0"
] | 166
|
2015-01-14T16:06:37.000Z
|
2021-11-15T12:17:11.000Z
|
pyleus/__init__.py
|
WenbinTan/pyleus
|
8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce
|
[
"Apache-2.0"
] | 105
|
2015-01-16T19:59:06.000Z
|
2016-05-13T19:40:45.000Z
|
pyleus/__init__.py
|
WenbinTan/pyleus
|
8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce
|
[
"Apache-2.0"
] | 62
|
2015-01-19T07:42:24.000Z
|
2021-06-05T21:02:09.000Z
|
from __future__ import absolute_import
import pkg_resources
__version__ = '0.3.0'
BASE_JAR = "pyleus-base.jar"
BASE_JAR_PATH = pkg_resources.resource_filename('pyleus', BASE_JAR)
| 20.222222
| 67
| 0.802198
| 27
| 182
| 4.814815
| 0.555556
| 0.215385
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018405
| 0.104396
| 182
| 8
| 68
| 22.75
| 0.779141
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bcb715d3c6b6fa6d597e8d6b17001f53b6a1ae57
| 11,165
|
py
|
Python
|
sdk/lusid/models/__init__.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/__init__.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/__init__.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2808
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from lusid.models.access_controlled_action import AccessControlledAction
from lusid.models.access_controlled_resource import AccessControlledResource
from lusid.models.action_id import ActionId
from lusid.models.adjust_holding import AdjustHolding
from lusid.models.adjust_holding_request import AdjustHoldingRequest
from lusid.models.annul_quotes_response import AnnulQuotesResponse
from lusid.models.change import Change
from lusid.models.complete_portfolio import CompletePortfolio
from lusid.models.complete_relation import CompleteRelation
from lusid.models.constituents_adjustment_header import ConstituentsAdjustmentHeader
from lusid.models.create_cut_label_definition_request import CreateCutLabelDefinitionRequest
from lusid.models.create_derived_transaction_portfolio_request import CreateDerivedTransactionPortfolioRequest
from lusid.models.create_portfolio_details import CreatePortfolioDetails
from lusid.models.create_portfolio_group_request import CreatePortfolioGroupRequest
from lusid.models.create_property_definition_request import CreatePropertyDefinitionRequest
from lusid.models.create_reference_portfolio_request import CreateReferencePortfolioRequest
from lusid.models.create_relation_definition_request import CreateRelationDefinitionRequest
from lusid.models.create_relation_request import CreateRelationRequest
from lusid.models.create_transaction_portfolio_request import CreateTransactionPortfolioRequest
from lusid.models.currency_and_amount import CurrencyAndAmount
from lusid.models.cut_label_definition import CutLabelDefinition
from lusid.models.cut_local_time import CutLocalTime
from lusid.models.data_type import DataType
from lusid.models.date_range import DateRange
from lusid.models.delete_instrument_response import DeleteInstrumentResponse
from lusid.models.delete_relation_request import DeleteRelationRequest
from lusid.models.deleted_entity_response import DeletedEntityResponse
from lusid.models.error_detail import ErrorDetail
from lusid.models.execution_request import ExecutionRequest
from lusid.models.expanded_group import ExpandedGroup
from lusid.models.file_response import FileResponse
from lusid.models.get_instruments_response import GetInstrumentsResponse
from lusid.models.get_quotes_response import GetQuotesResponse
from lusid.models.get_reference_portfolio_constituents_response import GetReferencePortfolioConstituentsResponse
from lusid.models.holding_adjustment import HoldingAdjustment
from lusid.models.holdings_adjustment import HoldingsAdjustment
from lusid.models.holdings_adjustment_header import HoldingsAdjustmentHeader
from lusid.models.i_unit_definition_dto import IUnitDefinitionDto
from lusid.models.id_selector_definition import IdSelectorDefinition
from lusid.models.identifier_part_schema import IdentifierPartSchema
from lusid.models.instrument import Instrument
from lusid.models.instrument_definition import InstrumentDefinition
from lusid.models.instrument_id_type_descriptor import InstrumentIdTypeDescriptor
from lusid.models.instrument_id_value import InstrumentIdValue
from lusid.models.label_value_set import LabelValueSet
from lusid.models.legal_entity import LegalEntity
from lusid.models.link import Link
from lusid.models.lusid_instrument import LusidInstrument
from lusid.models.lusid_problem_details import LusidProblemDetails
from lusid.models.lusid_validation_problem_details import LusidValidationProblemDetails
from lusid.models.metric_value import MetricValue
from lusid.models.model_property import ModelProperty
from lusid.models.output_transaction import OutputTransaction
from lusid.models.paged_resource_list_of_cut_label_definition import PagedResourceListOfCutLabelDefinition
from lusid.models.paged_resource_list_of_instrument import PagedResourceListOfInstrument
from lusid.models.paged_resource_list_of_legal_entity import PagedResourceListOfLegalEntity
from lusid.models.paged_resource_list_of_portfolio_group_search_result import PagedResourceListOfPortfolioGroupSearchResult
from lusid.models.paged_resource_list_of_portfolio_search_result import PagedResourceListOfPortfolioSearchResult
from lusid.models.paged_resource_list_of_property_definition_search_result import PagedResourceListOfPropertyDefinitionSearchResult
from lusid.models.perpetual_property import PerpetualProperty
from lusid.models.portfolio import Portfolio
from lusid.models.portfolio_cash_flow import PortfolioCashFlow
from lusid.models.portfolio_details import PortfolioDetails
from lusid.models.portfolio_group import PortfolioGroup
from lusid.models.portfolio_group_properties import PortfolioGroupProperties
from lusid.models.portfolio_group_search_result import PortfolioGroupSearchResult
from lusid.models.portfolio_holding import PortfolioHolding
from lusid.models.portfolio_properties import PortfolioProperties
from lusid.models.portfolio_reconciliation_request import PortfolioReconciliationRequest
from lusid.models.portfolio_search_result import PortfolioSearchResult
from lusid.models.portfolios_reconciliation_request import PortfoliosReconciliationRequest
from lusid.models.processed_command import ProcessedCommand
from lusid.models.property_definition import PropertyDefinition
from lusid.models.property_definition_search_result import PropertyDefinitionSearchResult
from lusid.models.property_interval import PropertyInterval
from lusid.models.property_value import PropertyValue
from lusid.models.quote import Quote
from lusid.models.quote_id import QuoteId
from lusid.models.quote_series_id import QuoteSeriesId
from lusid.models.realised_gain_loss import RealisedGainLoss
from lusid.models.reconciliation_break import ReconciliationBreak
from lusid.models.reference_portfolio_constituent import ReferencePortfolioConstituent
from lusid.models.reference_portfolio_constituent_request import ReferencePortfolioConstituentRequest
from lusid.models.relation import Relation
from lusid.models.relation_definition import RelationDefinition
from lusid.models.resource_id import ResourceId
from lusid.models.resource_list_of_access_controlled_resource import ResourceListOfAccessControlledResource
from lusid.models.resource_list_of_change import ResourceListOfChange
from lusid.models.resource_list_of_constituents_adjustment_header import ResourceListOfConstituentsAdjustmentHeader
from lusid.models.resource_list_of_data_type import ResourceListOfDataType
from lusid.models.resource_list_of_holdings_adjustment_header import ResourceListOfHoldingsAdjustmentHeader
from lusid.models.resource_list_of_i_unit_definition_dto import ResourceListOfIUnitDefinitionDto
from lusid.models.resource_list_of_instrument_id_type_descriptor import ResourceListOfInstrumentIdTypeDescriptor
from lusid.models.resource_list_of_portfolio import ResourceListOfPortfolio
from lusid.models.resource_list_of_portfolio_cash_flow import ResourceListOfPortfolioCashFlow
from lusid.models.resource_list_of_portfolio_group import ResourceListOfPortfolioGroup
from lusid.models.resource_list_of_portfolio_search_result import ResourceListOfPortfolioSearchResult
from lusid.models.resource_list_of_processed_command import ResourceListOfProcessedCommand
from lusid.models.resource_list_of_property_definition import ResourceListOfPropertyDefinition
from lusid.models.resource_list_of_property_interval import ResourceListOfPropertyInterval
from lusid.models.resource_list_of_quote import ResourceListOfQuote
from lusid.models.resource_list_of_reconciliation_break import ResourceListOfReconciliationBreak
from lusid.models.resource_list_of_relation import ResourceListOfRelation
from lusid.models.resource_list_of_scope_definition import ResourceListOfScopeDefinition
from lusid.models.scope_definition import ScopeDefinition
from lusid.models.side_configuration_data import SideConfigurationData
from lusid.models.stream import Stream
from lusid.models.target_tax_lot import TargetTaxLot
from lusid.models.target_tax_lot_request import TargetTaxLotRequest
from lusid.models.transaction import Transaction
from lusid.models.transaction_configuration_data import TransactionConfigurationData
from lusid.models.transaction_configuration_data_request import TransactionConfigurationDataRequest
from lusid.models.transaction_configuration_movement_data import TransactionConfigurationMovementData
from lusid.models.transaction_configuration_movement_data_request import TransactionConfigurationMovementDataRequest
from lusid.models.transaction_configuration_type_alias import TransactionConfigurationTypeAlias
from lusid.models.transaction_price import TransactionPrice
from lusid.models.transaction_property_mapping import TransactionPropertyMapping
from lusid.models.transaction_property_mapping_request import TransactionPropertyMappingRequest
from lusid.models.transaction_query_parameters import TransactionQueryParameters
from lusid.models.transaction_request import TransactionRequest
from lusid.models.transaction_set_configuration_data import TransactionSetConfigurationData
from lusid.models.update_cut_label_definition_request import UpdateCutLabelDefinitionRequest
from lusid.models.update_instrument_identifier_request import UpdateInstrumentIdentifierRequest
from lusid.models.update_portfolio_group_request import UpdatePortfolioGroupRequest
from lusid.models.update_portfolio_request import UpdatePortfolioRequest
from lusid.models.update_property_definition_request import UpdatePropertyDefinitionRequest
from lusid.models.upsert_instrument_properties_response import UpsertInstrumentPropertiesResponse
from lusid.models.upsert_instrument_property_request import UpsertInstrumentPropertyRequest
from lusid.models.upsert_instruments_response import UpsertInstrumentsResponse
from lusid.models.upsert_legal_entity_request import UpsertLegalEntityRequest
from lusid.models.upsert_portfolio_executions_response import UpsertPortfolioExecutionsResponse
from lusid.models.upsert_portfolio_transactions_response import UpsertPortfolioTransactionsResponse
from lusid.models.upsert_quote_request import UpsertQuoteRequest
from lusid.models.upsert_quotes_response import UpsertQuotesResponse
from lusid.models.upsert_reference_portfolio_constituents_request import UpsertReferencePortfolioConstituentsRequest
from lusid.models.upsert_reference_portfolio_constituents_response import UpsertReferencePortfolioConstituentsResponse
from lusid.models.upsert_transaction_properties_response import UpsertTransactionPropertiesResponse
from lusid.models.user import User
from lusid.models.version import Version
from lusid.models.version_summary_dto import VersionSummaryDto
from lusid.models.versioned_resource_list_of_output_transaction import VersionedResourceListOfOutputTransaction
from lusid.models.versioned_resource_list_of_portfolio_holding import VersionedResourceListOfPortfolioHolding
from lusid.models.versioned_resource_list_of_transaction import VersionedResourceListOfTransaction
| 69.347826
| 131
| 0.915092
| 1,242
| 11,165
| 7.94525
| 0.219002
| 0.130422
| 0.217369
| 0.044285
| 0.255371
| 0.147953
| 0.081273
| 0.008715
| 0
| 0
| 0
| 0.00114
| 0.056964
| 11,165
| 160
| 132
| 69.78125
| 0.936081
| 0.020331
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcb7571f4904175d49f4329483b1baabf1cc4fce
| 214
|
py
|
Python
|
timeline/dbindexes.py
|
asrashley/ieee-802-11-timeline
|
b4375dbde023dee214642e18c09318e9383a2bcf
|
[
"Apache-2.0"
] | null | null | null |
timeline/dbindexes.py
|
asrashley/ieee-802-11-timeline
|
b4375dbde023dee214642e18c09318e9383a2bcf
|
[
"Apache-2.0"
] | null | null | null |
timeline/dbindexes.py
|
asrashley/ieee-802-11-timeline
|
b4375dbde023dee214642e18c09318e9383a2bcf
|
[
"Apache-2.0"
] | 1
|
2020-06-01T07:46:12.000Z
|
2020-06-01T07:46:12.000Z
|
from models import DenormalizedProjectBallots
from dbindexer.lookups import StandardLookup
from dbindexer.api import register_index
#register_index(DenormalizedProjectBallots, {'project__pk': StandardLookup(),
#})
| 35.666667
| 77
| 0.85514
| 21
| 214
| 8.52381
| 0.571429
| 0.145251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 214
| 6
| 78
| 35.666667
| 0.90404
| 0.364486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcbb11d9e39ab6341c66fb1dc0f07d1556cf7992
| 8,570
|
py
|
Python
|
raiden_contracts/tests/test_channel_close.py
|
anmolshl/raiden-contracts
|
fd752a1e7ef2a77ce90a1a6cf87cebcca66d5038
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/test_channel_close.py
|
anmolshl/raiden-contracts
|
fd752a1e7ef2a77ce90a1a6cf87cebcca66d5038
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/test_channel_close.py
|
anmolshl/raiden-contracts
|
fd752a1e7ef2a77ce90a1a6cf87cebcca66d5038
|
[
"MIT"
] | null | null | null |
import pytest
from eth_tester.exceptions import TransactionFailed
from raiden_contracts.constants import (
TEST_SETTLE_TIMEOUT_MIN,
EVENT_CHANNEL_CLOSED,
CHANNEL_STATE_NONEXISTENT,
CHANNEL_STATE_SETTLED,
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden_contracts.utils.events import check_channel_closed
from .fixtures.config import fake_bytes, fake_hex
def test_close_nonexistent_channel(
token_network,
get_accounts,
):
(A, B) = get_accounts(2)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert state == CHANNEL_STATE_NONEXISTENT
assert settle_block_number == 0
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
def test_close_settled_channel(
web3,
token_network,
create_channel,
channel_deposit,
get_accounts,
):
(A, B) = get_accounts(2)
create_channel(A, B, TEST_SETTLE_TIMEOUT_MIN)
channel_deposit(A, 5, B)
(_, _, state) = token_network.functions.getChannelInfo(A, B).call()
assert state == CHANNEL_STATE_OPENED
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
web3.testing.mine(TEST_SETTLE_TIMEOUT_MIN)
token_network.functions.settleChannel(
A,
0,
0,
fake_bytes(32),
B,
0,
0,
fake_bytes(32),
).transact({'from': A})
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert state == CHANNEL_STATE_SETTLED
assert settle_block_number == 0
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
def test_close_wrong_signature(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
):
(A, B, C) = get_accounts(3)
deposit_A = 6
transferred_amount = 5
nonce = 3
locksroot = fake_hex(32, '03')
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, deposit_A, B)
# Create balance proofs
balance_proof = create_balance_proof(
channel_identifier,
C,
transferred_amount,
0,
nonce,
locksroot,
)
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(B, *balance_proof).transact({'from': A})
def test_close_call_twice_fail(
token_network,
create_channel,
channel_deposit,
get_accounts,
):
(A, B) = get_accounts(2)
create_channel(A, B)
channel_deposit(A, 5, B)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
def test_close_wrong_sender(
token_network,
create_channel,
channel_deposit,
get_accounts,
):
(A, B, C) = get_accounts(3)
create_channel(A, B)
channel_deposit(A, 5, B)
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': C})
def test_close_first_argument_is_for_partner_transfer(
token_network,
create_channel,
get_accounts,
create_balance_proof,
):
(A, B) = get_accounts(2)
# Create channel
channel_identifier = create_channel(A, B, settle_timeout=TEST_SETTLE_TIMEOUT_MIN)[0]
# Create balance proofs
balance_proof = create_balance_proof(
channel_identifier,
B,
)
# closeChannel fails, if the provided balance proof is from the same participant who closes
with pytest.raises(TransactionFailed):
token_network.functions.closeChannel(B, *balance_proof).transact({'from': B})
# Else, closeChannel works with this balance proof
token_network.functions.closeChannel(B, *balance_proof).transact({'from': A})
def test_close_first_participant_can_close(
token_network,
create_channel,
get_accounts,
):
(A, B) = get_accounts(2)
create_channel(A, B)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
def test_close_second_participant_can_close(
token_network,
create_channel,
get_accounts,
):
(A, B) = get_accounts(2)
create_channel(A, B)
token_network.functions.closeChannel(
A,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': B})
def test_close_channel_state(
token_network,
create_channel,
channel_deposit,
get_accounts,
get_block,
create_balance_proof,
):
(A, B) = get_accounts(2)
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
deposit_A = 20
transferred_amount = 5
nonce = 3
locksroot = fake_hex(32, '03')
# Create channel and deposit
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
# Create balance proofs
balance_proof = create_balance_proof(
channel_identifier,
B,
transferred_amount,
0,
nonce,
locksroot,
)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number == settle_timeout
assert state == CHANNEL_STATE_OPENED
(
_, _,
A_is_the_closer,
A_balance_hash,
A_nonce,
) = token_network.functions.getChannelParticipantInfo(A, B).call()
assert A_is_the_closer is False
assert A_balance_hash == fake_bytes(32)
assert A_nonce == 0
txn_hash = token_network.functions.closeChannel(B, *balance_proof).transact({'from': A})
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number == settle_timeout + get_block(txn_hash)
assert state == CHANNEL_STATE_CLOSED
(
_, _,
A_is_the_closer,
A_balance_hash,
A_nonce,
) = token_network.functions.getChannelParticipantInfo(A, B).call()
assert A_is_the_closer is True
assert A_balance_hash == fake_bytes(32)
assert A_nonce == 0
(
_, _,
B_is_the_closer,
B_balance_hash,
B_nonce,
) = token_network.functions.getChannelParticipantInfo(B, A).call()
assert B_is_the_closer is False
assert B_balance_hash == balance_proof[0]
assert B_nonce == nonce
def test_close_channel_event_no_offchain_transfers(
get_accounts,
token_network,
create_channel,
event_handler,
):
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
channel_identifier = create_channel(A, B)[0]
# No off-chain transfers have occured
# There is no signature data here, because it was never provided to A
txn_hash = token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
ev_handler.add(txn_hash, EVENT_CHANNEL_CLOSED, check_channel_closed(channel_identifier, A))
ev_handler.check()
def test_close_channel_event(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
event_handler,
):
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
deposit_A = 10
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, deposit_A, B)
balance_proof = create_balance_proof(channel_identifier, B, 5, 0, 3)
txn_hash = token_network.functions.closeChannel(B, *balance_proof).transact({'from': A})
ev_handler.add(txn_hash, EVENT_CHANNEL_CLOSED, check_channel_closed(channel_identifier, A))
ev_handler.check()
| 25.58209
| 95
| 0.639907
| 1,022
| 8,570
| 5.021526
| 0.118395
| 0.084178
| 0.094115
| 0.090023
| 0.802611
| 0.755651
| 0.712977
| 0.691738
| 0.632892
| 0.623539
| 0
| 0.019542
| 0.265578
| 8,570
| 334
| 96
| 25.658683
| 0.795837
| 0.04084
| 0
| 0.774074
| 0
| 0
| 0.007795
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.040741
| false
| 0
| 0.018519
| 0
| 0.059259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c7190701e0b1bec12530d62480fcec0b3bbdc47
| 32
|
py
|
Python
|
brotab/tests/mocks.py
|
craigevil/brotab
|
9d21332af716b73a8bc5dee90b7ea26baeb99a7c
|
[
"MIT"
] | 3
|
2022-03-30T01:29:07.000Z
|
2022-03-30T10:41:36.000Z
|
brotab/tests/mocks.py
|
craigevil/brotab
|
9d21332af716b73a8bc5dee90b7ea26baeb99a7c
|
[
"MIT"
] | null | null | null |
brotab/tests/mocks.py
|
craigevil/brotab
|
9d21332af716b73a8bc5dee90b7ea26baeb99a7c
|
[
"MIT"
] | null | null | null |
class BrowserPortMock:
pass
| 10.666667
| 22
| 0.75
| 3
| 32
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 2
| 23
| 16
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
4c7801deb524a1c11b2e33ff460041710f97750c
| 199
|
py
|
Python
|
tests/__init__.py
|
ekhtiar/airflow
|
9410715c81a2a16dcd04e7cce56d75747bb19ff6
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
ekhtiar/airflow
|
9410715c81a2a16dcd04e7cce56d75747bb19ff6
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
ekhtiar/airflow
|
9410715c81a2a16dcd04e7cce56d75747bb19ff6
|
[
"Apache-2.0"
] | 1
|
2019-12-12T06:44:14.000Z
|
2019-12-12T06:44:14.000Z
|
from __future__ import absolute_import
from .configuration import *
from .core import *
from .jobs import *
from .models import *
from .operators import *
from .contrib import *
from .utils import *
| 22.111111
| 38
| 0.768844
| 26
| 199
| 5.692308
| 0.423077
| 0.472973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160804
| 199
| 8
| 39
| 24.875
| 0.886228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d5e4ed175f3d8cf00270349bde7092b3e63d44da
| 43
|
py
|
Python
|
models/cells/PC2001Miyasho/__init__.py
|
HarshKhilawala/cerebmodels
|
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
|
[
"BSD-3-Clause"
] | null | null | null |
models/cells/PC2001Miyasho/__init__.py
|
HarshKhilawala/cerebmodels
|
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
|
[
"BSD-3-Clause"
] | 9
|
2020-03-24T17:09:03.000Z
|
2021-05-17T16:11:17.000Z
|
models/cells/PC2001Miyasho/__init__.py
|
myHBPwork/cerebmodels
|
371ea7f1bbe388f1acade17c7128b8ca6ab8fb7a
|
[
"BSD-3-Clause"
] | 1
|
2021-05-21T03:08:41.000Z
|
2021-05-21T03:08:41.000Z
|
# ~/models/cells/PC2001Miyasho/__init__.py
| 21.5
| 42
| 0.790698
| 5
| 43
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 0.046512
| 43
| 1
| 43
| 43
| 0.634146
| 0.930233
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
910f33eefb3f8b53f4490be32eb382248cee0c0c
| 3,913
|
py
|
Python
|
ai/causalcell/models/translation_models.py
|
Bertinus/causal_cell_embedding
|
417b55749130fc7b7832fd3ee4c49feff4a04593
|
[
"MIT"
] | null | null | null |
ai/causalcell/models/translation_models.py
|
Bertinus/causal_cell_embedding
|
417b55749130fc7b7832fd3ee4c49feff4a04593
|
[
"MIT"
] | null | null | null |
ai/causalcell/models/translation_models.py
|
Bertinus/causal_cell_embedding
|
417b55749130fc7b7832fd3ee4c49feff4a04593
|
[
"MIT"
] | null | null | null |
from ai.causalcell.models.utils import *
import ai.causalcell.utils.register as register
import torch
from ai.causalcell.models.autoencoder import VariationalAutoEncoder
import torch.nn as nn
import copy
@register.setmodelname('env_trans_VAE')
class TranslationVariationalAutoEncoder(VariationalAutoEncoder):
"""
VAE that adapts to each environment by translating one of the latent variables.
The prior in latent space depends on the translation (after the translation is applied)
"""
def __init__(self, enc_layers, dec_layers, aux_layers, optimizer_params,
beta=1, dropout=0, norm='none', softmax=True, temperature=1):
"""
:param softmax: if True, a softmax is used to normalize env_mu so that the absolute values of env_mu sum to 1
:param temperature: temperature of the softmax
"""
self.enc_layers = copy.deepcopy(enc_layers)
self.dec_layers = copy.deepcopy(dec_layers)
self.enc_layers[0] += aux_layers[0] # The encoder has to take the fingerprint as input
super(TranslationVariationalAutoEncoder, self).__init__(self.enc_layers, self.dec_layers, optimizer_params,
beta=beta, dropout=dropout, norm=norm)
if softmax:
self.softmax = nn.Softmax(dim=1)
self.temperature = temperature
else:
if temperature != 1:
print("If Softmax is False, Temperature is set to 1")
self.temperature = 1
self.softmax = Dummy()
self.env_prior_mu = LinearLayers(layers=aux_layers, dropout=dropout, norm=norm, activate_final=False)
def forward(self, x, fingerprint, compound=0, line=0):
mu, logvar = self.embed(torch.cat((x, fingerprint), dim=1))
env_mu = self.env_prior_mu(fingerprint)
alpha = self.softmax(1 / self.temperature * torch.abs(env_mu))
env_mu = alpha * env_mu # Make mu sparse
z = self.reparameterize(mu, logvar)
x_prime = self.decoder(z + env_mu)
return {'z': z, 'x_prime': x_prime, 'x': x, 'mu': mu, 'logvar': logvar,
'env_mu': env_mu}
@register.setmodelname('no_env_input_trans_VAE')
class NoEnvInputTranslationVariationalAutoEncoder(VariationalAutoEncoder):
"""
VAE that adapts to each environment by translating one of the latent variables.
The prior in latent space depends on the translation (after the translation is applied)
"""
def __init__(self, enc_layers, dec_layers, aux_layers, beta=1, dropout=0, norm='none', softmax=True,
temperature=1):
"""
:param softmax: if True, a softmax is used to normalize env_mu so that the absolute values of env_mu sum to 1
:param temperature: temperature of the softmax
"""
super(NoEnvInputTranslationVariationalAutoEncoder, self).__init__(enc_layers, dec_layers,
beta=beta, dropout=dropout, norm=norm)
if softmax:
self.softmax = nn.Softmax(dim=1)
self.temperature = temperature
else:
if temperature != 1:
print("If Softmax is False, Temperature is set to 1")
self.temperature = 1
self.softmax = Dummy()
self.env_prior_mu = LinearLayers(layers=aux_layers, dropout=dropout, norm=norm, activate_final=False)
def forward(self, x, fingerprint, compound=0, line=0):
mu, logvar = self.embed(x)
env_mu = self.env_prior_mu(fingerprint)
alpha = self.softmax(1 / self.temperature * torch.abs(env_mu))
env_mu = alpha * env_mu # Make mu sparse
z = self.reparameterize(mu, logvar)
x_prime = self.decoder(z + env_mu)
return {'z': z, 'x_prime': x_prime, 'x': x, 'mu': mu, 'logvar': logvar,
'env_mu': env_mu}
| 46.035294
| 117
| 0.639152
| 494
| 3,913
| 4.908907
| 0.206478
| 0.037113
| 0.039588
| 0.036289
| 0.745567
| 0.727423
| 0.727423
| 0.727423
| 0.727423
| 0.727423
| 0
| 0.008729
| 0.268081
| 3,913
| 84
| 118
| 46.583333
| 0.837989
| 0.186302
| 0
| 0.631579
| 0
| 0
| 0.057449
| 0.007141
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.105263
| 0
| 0.245614
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
911cb0825001751b3234282d0109171076d7c383
| 2,574
|
py
|
Python
|
pytils/test/test_utils.py
|
yamnikov-oleg/pytils
|
d09ecdfca203aa67814487fb07792a000995b072
|
[
"MIT"
] | null | null | null |
pytils/test/test_utils.py
|
yamnikov-oleg/pytils
|
d09ecdfca203aa67814487fb07792a000995b072
|
[
"MIT"
] | null | null | null |
pytils/test/test_utils.py
|
yamnikov-oleg/pytils
|
d09ecdfca203aa67814487fb07792a000995b072
|
[
"MIT"
] | 1
|
2022-02-19T08:36:07.000Z
|
2022-02-19T08:36:07.000Z
|
# -*- coding: utf-8 -*-
"""
Unit-tests for pytils.utils
"""
import decimal
import unittest
import pytils
class ChecksTestCase(unittest.TestCase):
"""
Test case for check_* utils
"""
def testCheckLength(self):
"""
Unit-test for pytils.utils.check_length
"""
self.assertEqual(pytils.utils.check_length("var", 3), None)
self.assertRaises(ValueError, pytils.utils.check_length, "var", 4)
self.assertRaises(ValueError, pytils.utils.check_length, "var", 2)
self.assertRaises(ValueError, pytils.utils.check_length, (1,2), 3)
def testCheckPositive(self):
"""
Unit-test for pytils.utils.check_positive
"""
self.assertEqual(pytils.utils.check_positive(0), None)
self.assertEqual(pytils.utils.check_positive(1), None)
self.assertEqual(pytils.utils.check_positive(1, False), None)
self.assertEqual(pytils.utils.check_positive(1, strict=False), None)
self.assertEqual(pytils.utils.check_positive(1, True), None)
self.assertEqual(pytils.utils.check_positive(1, strict=True), None)
self.assertEqual(pytils.utils.check_positive(decimal.Decimal("2.0")), None)
self.assertEqual(pytils.utils.check_positive(2.0), None)
self.assertRaises(ValueError, pytils.utils.check_positive, -2)
self.assertRaises(ValueError, pytils.utils.check_positive, -2.0)
self.assertRaises(ValueError, pytils.utils.check_positive, decimal.Decimal("-2.0"))
self.assertRaises(ValueError, pytils.utils.check_positive, 0, True)
class SplitValuesTestCase(unittest.TestCase):
def testClassicSplit(self):
"""
Unit-test for pytils.utils.split_values, classic split
"""
self.assertEqual(("Раз", "Два", "Три"), pytils.utils.split_values("Раз,Два,Три"))
self.assertEqual(("Раз", "Два", "Три"), pytils.utils.split_values("Раз, Два,Три"))
self.assertEqual(("Раз", "Два", "Три"), pytils.utils.split_values(" Раз, Два, Три "))
self.assertEqual(("Раз", "Два", "Три"), pytils.utils.split_values(" Раз, \nДва,\n Три "))
def testEscapedSplit(self):
"""
Unit-test for pytils.utils.split_values, split with escaping
"""
self.assertEqual(("Раз,Два", "Три,Четыре", "Пять,Шесть"), pytils.utils.split_values("Раз\,Два,Три\,Четыре,Пять\,Шесть"))
self.assertEqual(("Раз, Два", "Три", "Четыре"), pytils.utils.split_values("Раз\, Два, Три, Четыре"))
if __name__ == '__main__':
unittest.main()
| 38.41791
| 128
| 0.651127
| 306
| 2,574
| 5.362745
| 0.179739
| 0.180987
| 0.175503
| 0.190128
| 0.801341
| 0.776356
| 0.703839
| 0.631322
| 0.287629
| 0.130408
| 0
| 0.011111
| 0.195804
| 2,574
| 66
| 129
| 39
| 0.781643
| 0.107226
| 0
| 0.090909
| 0
| 0
| 0.100321
| 0.014659
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.121212
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
912e06e33a6a27013d2009e2907af76625e51bb0
| 28
|
py
|
Python
|
src/LuauAST/types/nodes.py
|
Roblox-py/roblox-py
|
64d6d129c5d6a8edb43410242333fe539d34a1d9
|
[
"MIT"
] | 2
|
2021-01-08T20:43:36.000Z
|
2021-05-24T14:31:20.000Z
|
src/LuauAST/types/nodes.py
|
Roblox-py/roblox-py
|
64d6d129c5d6a8edb43410242333fe539d34a1d9
|
[
"MIT"
] | null | null | null |
src/LuauAST/types/nodes.py
|
Roblox-py/roblox-py
|
64d6d129c5d6a8edb43410242333fe539d34a1d9
|
[
"MIT"
] | null | null | null |
import luau from 'LuauAST'
| 9.333333
| 26
| 0.75
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 2
| 27
| 14
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e66b274323ce013e38bd7639b7d4c83f71b1e91f
| 120
|
py
|
Python
|
bin/CoverView.py
|
RahmanTeamDevelopment/CoverView
|
ece798725ac9ff7dfd74fcf1daee62fb6aca0f69
|
[
"MIT"
] | 2
|
2018-04-10T07:57:09.000Z
|
2018-04-23T09:11:39.000Z
|
bin/CoverView.py
|
RahmanTeamDevelopment/CoverView
|
ece798725ac9ff7dfd74fcf1daee62fb6aca0f69
|
[
"MIT"
] | 22
|
2017-05-12T15:02:04.000Z
|
2020-06-17T09:14:28.000Z
|
bin/CoverView.py
|
RahmanTeamDevelopment/CoverView
|
ece798725ac9ff7dfd74fcf1daee62fb6aca0f69
|
[
"MIT"
] | null | null | null |
#!env/bin/python
import sys
import coverview_.main
if __name__ == "__main__":
coverview_.main.main(sys.argv[1:])
| 13.333333
| 38
| 0.708333
| 17
| 120
| 4.411765
| 0.647059
| 0.346667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.141667
| 120
| 8
| 39
| 15
| 0.718447
| 0.125
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e67bf8add2ad41c33a8611539dc4145ca9b9a12c
| 286
|
py
|
Python
|
pyvi/tests/utils.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | 3
|
2018-07-26T09:52:31.000Z
|
2019-07-02T14:29:31.000Z
|
pyvi/tests/utils.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | null | null | null |
pyvi/tests/utils.py
|
Julian/PyVi
|
5e9e087146e0baffbe791bacccbfd4b840cdeb5f
|
[
"MIT"
] | null | null | null |
class StubCursor(object):
column = 0
row = 0
def __iter__(self):
return iter([self.row, self.column])
@property
def coords(self):
return self.row, self.column
@coords.setter
def coords(self, coords):
self.row, self.column = coords
| 19.066667
| 44
| 0.601399
| 36
| 286
| 4.666667
| 0.361111
| 0.125
| 0.196429
| 0.303571
| 0.27381
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.286713
| 286
| 14
| 45
| 20.428571
| 0.813725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e699117a41937afdb0b4f4577b801c50bd7a19ec
| 11,349
|
py
|
Python
|
scripts/heatmap_fig/pathdip.py
|
hui2000ji/scETM
|
0a34c345d70b262ebc38e033bae683fa4929ed3e
|
[
"BSD-3-Clause"
] | 24
|
2021-07-09T12:59:31.000Z
|
2022-03-04T22:31:41.000Z
|
scripts/heatmap_fig/pathdip.py
|
hui2000ji/scETM
|
0a34c345d70b262ebc38e033bae683fa4929ed3e
|
[
"BSD-3-Clause"
] | 3
|
2021-09-07T11:14:19.000Z
|
2022-02-15T01:38:09.000Z
|
scripts/heatmap_fig/pathdip.py
|
hui2000ji/scETM
|
0a34c345d70b262ebc38e033bae683fa4929ed3e
|
[
"BSD-3-Clause"
] | 3
|
2021-12-02T23:44:37.000Z
|
2022-02-11T16:46:45.000Z
|
# Note: Works with Python 3 and up
import urllib.request, urllib.parse
# ########################################################################################################
# class library mirDIP_Http #
# ########################################################################################################
class pathDIP_Http:
url = "http://ophid.utoronto.ca/pathDIP/Http_API"
map = {} # results will be here
def __init__(self):
return
def searchOnUniprot_IDs(self, IDs, component, sources):
self.sendPost('Uniprot ID', IDs, component, sources)
return
def searchOnGenesymbols(self, IDs, component, sources):
self.sendPost('Gene Symbol', IDs, component, sources)
return
def searchOnEntrez_IDs(self, IDs, component, sources):
self.sendPost('Egid', IDs, component, sources)
return
# .. serve POST request
def sendPost(self, typeChoice, IDs, component, sources):
params = {
'typeChoice' : typeChoice,
'IDs' : IDs,
'TableName' : component,
'DataSet' : sources}
params = bytes( urllib.parse.urlencode( params ).encode() )
response = ''
try:
handler = urllib.request.urlopen(self.url, params)
except Exception:
traceback.print_exc()
else:
self.response = handler.read().decode('utf-8')
## print(self.response)
self.makeMap()
return
def makeMap(self):
ENTRY_DEL = 0x01
KEY_DEL = 0x02
arr = self.response.split(chr(ENTRY_DEL))
for str in arr:
arrKeyValue = str.split(chr(KEY_DEL));
if len(arrKeyValue) > 1:
self.map[arrKeyValue[0]] = arrKeyValue[1]
return
def getGeneratedAt(self):
if "GeneratedAt" in self.map:
return self.map["GeneratedAt"]
else:
return ''
def getIDs(self):
if "IDs" in self.map:
return self.map["IDs"]
else:
return ''
def getDataComponent(self):
if "TableName" in self.map:
return self.map["TableName"]
else:
return ''
def getSources(self):
if "DataSet" in self.map:
return self.map["DataSet"]
else:
return ''
def getPathwayAnalysisSize(self):
if "SummarySize" in self.map:
return self.map["SummarySize"]
else:
return ''
def getPathwayAnalysis(self):
if "Summary" in self.map:
return self.map["Summary"]
else:
return ''
def getDetailsSize(self):
if "DetailsSize" in self.map:
return self.map["DetailsSize"]
else:
return ''
def getDetails(self):
if "Details" in self.map:
return self.map["Details"]
else:
return ''
#########################################################################################################
# Pick-up right sample for your search #
#########################################################################################################
# Note: Adjust 'ophid.utoronto.ca' if you are using our development server.
# ###########################################
# Example of search on Uniprot IDs #
# ###########################################
# Uniprot IDs
# - Comma delimited.
# - Mind case.
IDs = "O15379,P15692,P13236,P13236,P13236,Q96P48,O00468"
# Data component
# - Use the only one of those five:
# Literature curated (core) pathway memberships
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.95
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.95
# - Mind exact spelling and spaces.
component = "Literature curated (core) pathway memberships"
# Data sources (Note: Data source list was updated for full list of 22 available sources)
# - Use some or all of those:
# ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways
# - Comma delimited.
# - Mind exact spelling.
sources = "ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways"
o = pathDIP_Http()
o.searchOnUniprot_IDs(IDs, component, sources)
# print results
print("\r\n Search on Uniprot IDs: \r\n")
print("Generated at: " + o.getGeneratedAt())
print("IDs: " + o.getIDs())
print("DataComponent: " + o.getDataComponent())
print("Sources: " + o.getSources())
print();
print("Pathway enrichment analysis results size: " + o.getPathwayAnalysisSize());
print("Pathway enrichment analysis results ('q-value (FDR: BH-method) less than 0.05'): \r\n" + o.getPathwayAnalysis()); # formatted as tab-delimited spreadsheet
print("Detailed table size: " + o.getDetailsSize());
print("Pathway annotations for the full input list: \r\n" + o.getDetails()); # formatted as tab-delimited spreadsheet
# ###########################################
# Example of search on Gene Symbols #
# ###########################################
# Gene Symbols
# - Use HUGO Gene nomenclature
# - Comma delimited.
# - Mind case.
IDs = "HDAC3, VEGFA, CCL4, DP141L, ARAP1, MARK4, ZZZ3, AGRN, MAPRKE1, CRABP1, HIST1H1C, RACGAPI"
# Data component
# - Use the only one of those five:
# Literature curated (core) pathway memberships
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.95
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.95
# - Mind exact spelling and spaces.
component = "Literature curated (core) pathway memberships"
# Data sources (Note: Data source list was updated for full list of 22 available sources)
# - Use some or all of those:
# ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways
# - Comma delimited.
# - Mind exact spelling.
sources = "ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways"
o = pathDIP_Http()
o.searchOnGenesymbols(IDs, component, sources);
# print results
print("\r\n Search on Gene Symbols: \r\n")
print("Generated at: " + o.getGeneratedAt())
print("IDs: " + o.getIDs())
print("DataComponent: " + o.getDataComponent())
print("Sources: " + o.getSources())
print();
print("Pathway enrichment analysis results size: " + o.getPathwayAnalysisSize())
print("Pathway enrichment analysis results ('q-value (FDR: BH-method) less than 0.05'): \r\n" + o.getPathwayAnalysis()) # formatted as tab-delimited spreadsheet
print("Detailed table size: " + o.getDetailsSize())
print("Pathway annotations for the full input list: \r\n" + o.getDetails()) # formatted as tab-delimited spreadsheet
# ###########################################
# Example of search on Entrez Gene IDs #
# ###########################################
# Entrez Gene ID
# - Comma delimited.
# - Mind case.)
IDs = "375790,8841,6351,7422"
# Data component
# - Use the only one of those five:
# Literature curated (core) pathway memberships
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.95
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.99
# Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.95
# - Mind exact spelling and spaces.
component = "Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.99"
# Data sources # Data sources (Note: Data source list was updated for full list of 22 available sources)
# - Use some or all of those:
# ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways
# - Comma delimited.
# - Mind exact spelling.
sources = "ACSN2,BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,Panther_Pathway,PharmGKB,PID,RB-Pathways,REACTOME,stke,systems-biology.org,SignaLink2.0,SIGNOR2.0,SMPDB,Spike,UniProt_Pathways,WikiPathways"
o = pathDIP_Http()
o.searchOnEntrez_IDs(IDs, component, sources)
# print results
print("\r\n Search on Entrez Gene ID: \r\n")
print("Generated at: " + o.getGeneratedAt())
print("IDs: " + o.getIDs())
print("DataComponent: " + o.getDataComponent())
print("Sources: " + o.getSources())
print();
print("Pathway enrichment analysis results size: " + o.getPathwayAnalysisSize())
print("Pathway enrichment analysis results ('q-value (FDR: BH-method) less than 0.05'): \r\n" + o.getPathwayAnalysis()) # formatted as tab-delimited spreadsheet
print("Details Size: " + o.getDetailsSize())
print("Pathway annotations for the full input list: \r\n" + o.getDetails()) # formatted as tab-delimited spreadsheet
| 40.532143
| 215
| 0.627456
| 1,215
| 11,349
| 5.835391
| 0.1893
| 0.016784
| 0.049506
| 0.062341
| 0.779972
| 0.761495
| 0.731735
| 0.721016
| 0.721016
| 0.721016
| 0
| 0.018005
| 0.22187
| 11,349
| 280
| 216
| 40.532143
| 0.784849
| 0.413164
| 0
| 0.416
| 0
| 0.056
| 0.34706
| 0.114896
| 0
| 0
| 0.001356
| 0
| 0
| 1
| 0.112
| false
| 0
| 0.008
| 0.008
| 0.32
| 0.248
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc150a4185036a1af01114a91bef1afb12d1ddb4
| 96
|
py
|
Python
|
Str_Repr/test2.py
|
DSNR/snippets
|
12006dd083be60c6444d8b5ca48fd917005e081b
|
[
"MIT"
] | null | null | null |
Str_Repr/test2.py
|
DSNR/snippets
|
12006dd083be60c6444d8b5ca48fd917005e081b
|
[
"MIT"
] | null | null | null |
Str_Repr/test2.py
|
DSNR/snippets
|
12006dd083be60c6444d8b5ca48fd917005e081b
|
[
"MIT"
] | null | null | null |
a = [1,2,3,4]
b = 'sample string'
print str(a)
print repr(a)
print str(b)
print repr(b)
| 12
| 20
| 0.583333
| 20
| 96
| 2.8
| 0.55
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 0.239583
| 96
| 8
| 21
| 12
| 0.712329
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fc9957732fd3e2ee4f929fdf84f63035260fd64c
| 5,341
|
py
|
Python
|
code/G2/cfg.py
|
HJHjiahao/FCGP
|
fcb511157e9014d1ead778fc402900bd101c6425
|
[
"MIT"
] | null | null | null |
code/G2/cfg.py
|
HJHjiahao/FCGP
|
fcb511157e9014d1ead778fc402900bd101c6425
|
[
"MIT"
] | null | null | null |
code/G2/cfg.py
|
HJHjiahao/FCGP
|
fcb511157e9014d1ead778fc402900bd101c6425
|
[
"MIT"
] | null | null | null |
'''
Function:
配置文件
'''
import os
'''字体'''
FONTPATH = os.path.join(os.getcwd(), 'G2/resources/font/font.ttf')
'''图片'''
BULLET_IMAGE_PATHS = {
'up': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_up.png'),
'down': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_down.png'),
'left': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_left.png'),
'right': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_right.png')
}
ENEMY_TANK_IMAGE_PATHS = {
'1': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_3.png')],
'2': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_3.png')],
'3': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_3.png')],
'4': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_3.png')]
}
PLAYER_TANK_IMAGE_PATHS = {
'player1': [os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_2.png')],
'player2': [os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_2.png')]
}
FOOD_IMAGE_PATHS = {
'boom': os.path.join(os.getcwd(), 'G2/resources/images/food/food_boom.png'),
'clock': os.path.join(os.getcwd(), 'G2/resources/images/food/food_clock.png'),
'gun': os.path.join(os.getcwd(), 'G2/resources/images/food/food_gun.png'),
'iron': os.path.join(os.getcwd(), 'G2/resources/images/food/food_iron.png'),
'protect': os.path.join(os.getcwd(), 'G2/resources/images/food/food_protect.png'),
'star': os.path.join(os.getcwd(), 'G2/resources/images/food/food_star.png'),
'tank': os.path.join(os.getcwd(), 'G2/resources/images/food/food_tank.png')
}
HOME_IMAGE_PATHS = [os.path.join(os.getcwd(), 'G2/resources/images/home/home1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/home/home_destroyed.png')]
SCENE_IMAGE_PATHS = {
'brick': os.path.join(os.getcwd(), 'G2/Gresources/images/scene/brick.png'),
'ice': os.path.join(os.getcwd(), 'G2/resources/images/scene/ice.png'),
'iron': os.path.join(os.getcwd(), 'G2/resources/images/scene/iron.png'),
'river1': os.path.join(os.getcwd(), 'G2/resources/images/scene/river1.png'),
'river2': os.path.join(os.getcwd(), 'G2/resources/images/scene/river2.png'),
'tree': os.path.join(os.getcwd(), 'G2/resources/images/scene/tree.png')
}
OTHER_IMAGE_PATHS = {
'appear': os.path.join(os.getcwd(), 'G2/resources/images/others/appear.png'),
'background': os.path.join(os.getcwd(), 'G2/resources/images/others/background.png'),
'boom_dynamic': os.path.join(os.getcwd(), 'G2/resources/images/others/boom_dynamic.png'),
'boom_static': os.path.join(os.getcwd(), 'G2/resources/images/others/boom_static.png'),
'gameover': os.path.join(os.getcwd(), 'G2/resources/images/others/gameover.png'),
'logo': os.path.join(os.getcwd(), 'G2/resources/images/others/logo.png'),
'mask': os.path.join(os.getcwd(), 'G2/resources/images/others/mask.png'),
'protect': os.path.join(os.getcwd(), 'G2/resources/images/others/protect.png'),
'tip': os.path.join(os.getcwd(), 'G2/resources/images/others/tip.png'),
'gamebar': os.path.join(os.getcwd(), 'G2/resources/images/others/gamebar.png')
}
'''声音'''
AUDIO_PATHS = {
'add': os.path.join(os.getcwd(), 'G2/resources/audios/add.wav'),
'bang': os.path.join(os.getcwd(), 'G2/resources/audios/bang.wav'),
'blast': os.path.join(os.getcwd(), 'G2/resources/audios/blast.wav'),
'fire': os.path.join(os.getcwd(), 'G2/resources/audios/fire.wav'),
'Gunfire': os.path.join(os.getcwd(), 'G2/resources/audios/Gunfire.wav'),
'hit': os.path.join(os.getcwd(), 'G2/resources/audios/hit.wav'),
'start': os.path.join(os.getcwd(), 'G2/resources/audios/start.wav')
}
'''屏幕'''
WIDTH = 630
HEIGHT = 630
BORDER_LEN = 3
GRID_SIZE = 24
PANEL_WIDTH = 150
TITLE = '坦克大战'
'''关卡'''
LEVELFILEDIR = os.path.join(os.getcwd(), 'G2/modules/levels')
| 58.054348
| 95
| 0.662048
| 800
| 5,341
| 4.32
| 0.11625
| 0.104167
| 0.173611
| 0.208333
| 0.782986
| 0.782986
| 0.771412
| 0.763021
| 0.69213
| 0.536169
| 0
| 0.027148
| 0.124134
| 5,341
| 92
| 96
| 58.054348
| 0.711629
| 0.004119
| 0
| 0
| 0
| 0
| 0.473006
| 0.432089
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012346
| 0
| 0.012346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d9d2fb6d86b823a1620b3c1acd96472abd70e42
| 4,094
|
py
|
Python
|
tapiriik/testing/stddevice.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | null | null | null |
tapiriik/testing/stddevice.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | null | null | null |
tapiriik/testing/stddevice.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from tapiriik.services.devices import Device
from tapiriik.services.Decathlon.decathlon import DecathlonService
class StdDeviceTest(TestCase):
def test_fit_manufacturer_with_no_fit_device_return_provided_manufacturer_none_product(self):
undefined_fit_device_and_decathlon_fit_manufacturer_std_device = {
"@context": "/v2/contexts/UserDevice",
"@id": "/v2/user_devices/eu23218ff9b8010d294e",
"@type": "UserDevice",
"id": "eu23218ff9b8010d294e",
"serial": "30:67:71:B8:DB:02",
"fitManufacturer": 310,
"fitDevice": None,
"model": "/v2/device_models/99",
"firmware": "/v2/firmware/9928",
"user": "/v2/users/eu200a4d76c4eab29015",
}
# When
hubDevice = DecathlonService.convertStdDeviceToHubDevice(undefined_fit_device_and_decathlon_fit_manufacturer_std_device)
# Then
self.assertIsNotNone(hubDevice)
self.assertIsInstance(hubDevice, Device)
self.assertEqual(hubDevice.Manufacturer, "decathlon")
self.assertIsNone(hubDevice.Product)
def test_fit_manufacturer_with_fit_device_return_provided_manufacturer_and_provided_product(self):
undefined_fit_device_and_decathlon_fit_manufacturer_std_device = {
"@context": "/v2/contexts/UserDevice",
"@id": "/v2/user_devices/eu23218ff9b8010d294e",
"@type": "UserDevice",
"id": "eu23218ff9b8010d294e",
"serial": "30:67:71:B8:DB:02",
"fitManufacturer": 23,
"fitDevice": 15,
"model": "/v2/device_models/18",
"firmware": "/v2/firmware/9928",
"user": "/v2/users/eu200a4d76c4eab29015",
}
# When
hubDevice = DecathlonService.convertStdDeviceToHubDevice(undefined_fit_device_and_decathlon_fit_manufacturer_std_device)
# Then
self.assertIsNotNone(hubDevice)
self.assertIsInstance(hubDevice, Device)
self.assertEqual(hubDevice.Manufacturer, "suunto")
self.assertEqual(hubDevice.Product, 15)
def test_undefined_fit_manufacturer_and_fit_device_with_model_return_default_manufacturer_and_model_id_as_product(self):
undefined_fit_device_and_fit_manufacturer_but_model_std_device = {
"@context": "/v2/contexts/UserDevice",
"@id": "/v2/user_devices/eu23218ff9b8010d294e",
"@type": "UserDevice",
"id": "eu23218ff9b8010d294e",
"serial": "30:67:71:B8:DB:02",
"fitManufacturer": None,
"fitDevice": None,
"model": "/v2/device_models/99",
"firmware": "/v2/firmware/9928",
"user": "/v2/users/eu200a4d76c4eab29015"
}
# When
hubDevice = DecathlonService.convertStdDeviceToHubDevice(undefined_fit_device_and_fit_manufacturer_but_model_std_device)
# Then
self.assertIsNotNone(hubDevice)
self.assertIsInstance(hubDevice, Device)
self.assertEqual(hubDevice.Manufacturer, "decathlon")
self.assertEqual(hubDevice.Product, 99)
def test_unknown_manufacturer_with_fut_device_return_developper_manufacturer_and_fit_product(self):
unknown_manufacturer_std_device = {
"@context": "/v2/contexts/UserDevice",
"@id": "/v2/user_devices/eu23218ff9b8010d294e",
"@type": "UserDevice",
"id": "eu23218ff9b8010d294e",
"serial": "30:67:71:B8:DB:02",
"fitManufacturer": 294,
"fitDevice": 5,
"model": "/v2/device_models/99",
"firmware": "/v2/firmware/9928",
"user": "/v2/users/eu200a4d76c4eab29015"
}
# When
hubDevice = DecathlonService.convertStdDeviceToHubDevice(unknown_manufacturer_std_device)
# Then
self.assertIsNotNone(hubDevice)
self.assertIsInstance(hubDevice, Device)
self.assertIsNone(hubDevice.Manufacturer)
self.assertIsNone(hubDevice.Product)
| 38.622642
| 128
| 0.648999
| 378
| 4,094
| 6.730159
| 0.187831
| 0.053066
| 0.042453
| 0.049528
| 0.775943
| 0.727987
| 0.723664
| 0.723664
| 0.723664
| 0.723664
| 0
| 0.079729
| 0.243283
| 4,094
| 106
| 129
| 38.622642
| 0.741446
| 0.010503
| 0
| 0.644737
| 0
| 0
| 0.242394
| 0.089043
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.052632
| false
| 0
| 0.039474
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dc888ea421123a6cedbb0e30ce1181f1fb47307
| 146
|
py
|
Python
|
encode/tests/__init__.py
|
Ircam-Web/django-encode
|
2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce
|
[
"MIT"
] | 11
|
2015-03-11T20:48:13.000Z
|
2021-12-14T14:17:39.000Z
|
encode/tests/__init__.py
|
Ircam-Web/django-encode
|
2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce
|
[
"MIT"
] | 2
|
2015-11-24T22:10:06.000Z
|
2017-05-26T09:27:02.000Z
|
encode/tests/__init__.py
|
Ircam-Web/django-encode
|
2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce
|
[
"MIT"
] | 2
|
2019-08-09T17:29:41.000Z
|
2020-08-31T16:47:27.000Z
|
# Copyright Collab 2012-2016
# See LICENSE for details.
"""
Tests for the :py:mod:`encode` project.
"""
from .celery import app # flake8: noqa
| 16.222222
| 39
| 0.691781
| 21
| 146
| 4.809524
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.178082
| 146
| 8
| 40
| 18.25
| 0.766667
| 0.719178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5deda6cdf80bdf5091832ea3ef23ddc56fbe2e17
| 361
|
py
|
Python
|
dev_tools/test_manager.py
|
SocialSisterYi/Alconna
|
3e1d986ca5486dfd3c7bd80118a75364ab6831b8
|
[
"MIT"
] | null | null | null |
dev_tools/test_manager.py
|
SocialSisterYi/Alconna
|
3e1d986ca5486dfd3c7bd80118a75364ab6831b8
|
[
"MIT"
] | null | null | null |
dev_tools/test_manager.py
|
SocialSisterYi/Alconna
|
3e1d986ca5486dfd3c7bd80118a75364ab6831b8
|
[
"MIT"
] | null | null | null |
from dev_tools.test_alconna_1 import *
from dev_tools.test_alconna_2 import *
print("\n\n## ------------- Test Manager -------------## \n\n")
print(command_manager.all_command_help(max_length=6, page=3, pages="[%d/%d]"))
print("\n")
print(command_manager.broadcast("cmd.北京天气"))
print(command_manager.require("/pip"))
print(command_manager.command_help("/pip"))
| 40.111111
| 78
| 0.698061
| 54
| 361
| 4.407407
| 0.481481
| 0.201681
| 0.319328
| 0.134454
| 0.193277
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.058172
| 361
| 9
| 79
| 40.111111
| 0.688235
| 0
| 0
| 0
| 0
| 0
| 0.218232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.75
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b90048453095acbe52948585025b7df36f6ee00b
| 75
|
py
|
Python
|
7_kyu/sum_of_two_lowest_positive_integers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
7_kyu/sum_of_two_lowest_positive_integers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
7_kyu/sum_of_two_lowest_positive_integers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
def sum_two_smallest_numbers(numbers):
return sum(sorted(numbers)[:2])
| 25
| 38
| 0.76
| 11
| 75
| 4.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.106667
| 75
| 2
| 39
| 37.5
| 0.791045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f8db7637e59854334ce81efa34272b135f36f33f
| 49,872
|
py
|
Python
|
octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | 1
|
2021-12-27T13:18:38.000Z
|
2021-12-27T13:18:38.000Z
|
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutronclient.common import exceptions as neutron_exceptions
from novaclient.client import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.common import clients
from octavia.common import constants
from octavia.common import data_models
from octavia.common import exceptions
from octavia.network import base as network_base
from octavia.network import data_models as network_models
from octavia.network.drivers.neutron import allowed_address_pairs
from octavia.network.drivers.neutron import base as neutron_base
from octavia.tests.common import constants as t_constants
from octavia.tests.common import data_model_helpers as dmh
from octavia.tests.unit import base
class TestAllowedAddressPairsDriver(base.TestCase):
k_session = None
driver = None
SUBNET_ID_1 = "5"
SUBNET_ID_2 = "8"
FIXED_IP_ID_1 = "6"
FIXED_IP_ID_2 = "8"
NETWORK_ID_1 = "7"
NETWORK_ID_2 = "10"
IP_ADDRESS_1 = "10.0.0.2"
IP_ADDRESS_2 = "12.0.0.2"
AMPHORA_ID = "1"
LB_ID = "2"
COMPUTE_ID = "3"
ACTIVE = "ACTIVE"
LB_NET_IP = "10.0.0.2"
LB_NET_PORT_ID = "6"
HA_PORT_ID = "8"
HA_IP = "12.0.0.2"
PORT_ID = uuidutils.generate_uuid()
DEVICE_ID = uuidutils.generate_uuid()
def setUp(self):
super(TestAllowedAddressPairsDriver, self).setUp()
with mock.patch('octavia.common.clients.neutron_client.Client',
autospec=True) as neutron_client:
with mock.patch('stevedore.driver.DriverManager.driver',
autospec=True):
client = neutron_client(clients.NEUTRON_VERSION)
client.list_extensions.return_value = {
'extensions': [
{'alias': allowed_address_pairs.AAP_EXT_ALIAS},
{'alias': neutron_base.SEC_GRP_EXT_ALIAS}
]
}
self.k_session = mock.patch(
'keystoneauth1.session.Session').start()
self.driver = allowed_address_pairs.AllowedAddressPairsDriver()
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
'_check_extension_enabled', return_value=False)
def test_check_aap_loaded(self, mock_check_ext):
self.assertRaises(network_base.NetworkException,
self.driver._check_aap_loaded)
def test_get_interfaces_to_unplug(self):
if1 = network_models.Interface()
if1.network_id = 'if1-net'
if1.port_id = 'if1-port'
if1.fixed_ips = [network_models.FixedIP(ip_address='10.0.0.1')]
if2 = network_models.Interface()
if2.network_id = 'if2-net'
if2.port_id = 'if2-port'
if2.fixed_ips = [network_models.FixedIP(ip_address='11.0.0.1')]
interfaces = [if1, if2]
unpluggers = self.driver._get_interfaces_to_unplug(
interfaces, 'if1-net')
self.assertEqual([if1], unpluggers)
unpluggers = self.driver._get_interfaces_to_unplug(
interfaces, 'if1-net', ip_address='10.0.0.1')
self.assertEqual([if1], unpluggers)
unpluggers = self.driver._get_interfaces_to_unplug(
interfaces, 'if1-net', ip_address='11.0.0.1')
self.assertEqual([], unpluggers)
unpluggers = self.driver._get_interfaces_to_unplug(
interfaces, 'if3-net')
self.assertEqual([], unpluggers)
def test_deallocate_vip(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
sec_grp_id = 'lb-sec-grp1'
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
delete_port = self.driver.neutron_client.delete_port
delete_sec_grp = self.driver.neutron_client.delete_security_group
list_security_groups = self.driver.neutron_client.list_security_groups
security_groups = {
'security_groups': [
{'id': sec_grp_id}
]
}
list_security_groups.return_value = security_groups
self.driver.deallocate_vip(vip)
calls = [mock.call(vip.port_id)]
for amp in lb.amphorae:
calls.append(mock.call(amp.vrrp_port_id))
delete_port.assert_has_calls(calls, any_order=True)
delete_sec_grp.assert_called_once_with(sec_grp_id)
def test_deallocate_vip_no_port(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
sec_grp_id = 'lb-sec-grp1'
show_port = self.driver.neutron_client.show_port
port = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
show_port.side_effect = [port, Exception]
list_security_groups = self.driver.neutron_client.list_security_groups
security_groups = {
'security_groups': [
{'id': sec_grp_id}
]
}
list_security_groups.return_value = security_groups
self.driver.deallocate_vip(vip)
self.driver.neutron_client.update_port.assert_not_called()
def test_deallocate_vip_port_deleted(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
sec_grp_id = 'lb-sec-grp1'
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
delete_port = self.driver.neutron_client.delete_port
delete_port.side_effect = neutron_exceptions.NotFound
delete_sec_grp = self.driver.neutron_client.delete_security_group
list_security_groups = self.driver.neutron_client.list_security_groups
security_groups = {
'security_groups': [
{'id': sec_grp_id}
]
}
list_security_groups.return_value = security_groups
self.driver.deallocate_vip(vip)
calls = [mock.call(vip.port_id)]
for amp in lb.amphorae:
calls.append(mock.call(amp.vrrp_port_id))
delete_port.assert_has_calls(calls, any_order=True)
delete_sec_grp.assert_called_once_with(sec_grp_id)
def test_deallocate_vip_no_sec_group(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
delete_port = self.driver.neutron_client.delete_port
delete_sec_grp = self.driver.neutron_client.delete_security_group
list_security_groups = self.driver.neutron_client.list_security_groups
security_groups = {
'security_groups': []
}
list_security_groups.return_value = security_groups
self.driver.deallocate_vip(vip)
delete_port.assert_called_with(vip.port_id)
delete_sec_grp.assert_not_called()
def test_deallocate_vip_when_delete_port_fails(self):
lb = dmh.generate_load_balancer_tree()
vip = data_models.Vip(port_id='1')
vip.load_balancer = lb
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
delete_port = self.driver.neutron_client.delete_port
delete_port.side_effect = [None, None, TypeError]
self.assertRaises(network_base.DeallocateVIPException,
self.driver.deallocate_vip, vip)
def test_deallocate_vip_when_secgrp_has_allocated_ports(self):
max_retries = 1
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="networking", max_retries=max_retries)
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
delete_port = self.driver.neutron_client.delete_port
list_ports = self.driver.neutron_client.list_ports
list_security_groups = self.driver.neutron_client.list_security_groups
delete_sec_grp = self.driver.neutron_client.delete_security_group
security_groups = {
'security_groups': [
{'id': t_constants.MOCK_SECURITY_GROUP_ID}
]
}
list_security_groups.return_value = security_groups
delete_grp_results = [
network_base.DeallocateVIPException
for _ in range(max_retries + 1)] # Total tries = max_retries + 1
delete_grp_results.append(None)
delete_sec_grp.side_effect = delete_grp_results
list_ports.side_effect = [{
"ports": [t_constants.MOCK_NEUTRON_PORT['port'],
t_constants.MOCK_NEUTRON_PORT2['port']]}]
self.driver.deallocate_vip(vip)
# First we expect the amp's ports to be deleted
dp_calls = [mock.call(amp.vrrp_port_id) for amp in lb.amphorae]
# Then after the SG delete fails, extra hanging-on ports are removed
dp_calls.append(mock.call(t_constants.MOCK_PORT_ID))
# Lastly we remove the vip port
dp_calls.append(mock.call(vip.port_id))
self.assertEqual(len(dp_calls), delete_port.call_count)
delete_port.assert_has_calls(dp_calls)
dsg_calls = [mock.call(t_constants.MOCK_SECURITY_GROUP_ID)
for _ in range(max_retries + 2)] # Max fail + one success
self.assertEqual(len(dsg_calls), delete_sec_grp.call_count)
delete_sec_grp.assert_has_calls(dsg_calls)
def test_deallocate_vip_when_port_not_found(self):
lb = dmh.generate_load_balancer_tree()
vip = data_models.Vip(port_id='1')
vip.load_balancer = lb
show_port = self.driver.neutron_client.show_port
show_port.side_effect = neutron_exceptions.PortNotFoundClient
self.driver.deallocate_vip(vip)
def test_deallocate_vip_when_port_not_found_for_update(self):
lb = dmh.generate_load_balancer_tree()
vip = data_models.Vip(port_id='1')
vip.load_balancer = lb
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'device_owner': allowed_address_pairs.OCTAVIA_OWNER}}
update_port = self.driver.neutron_client.update_port
update_port.side_effect = neutron_exceptions.PortNotFoundClient
self.driver.deallocate_vip(vip)
def test_deallocate_vip_when_port_not_owned_by_octavia(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.load_balancer = lb
vip = lb.vip
sec_grp_id = 'lb-sec-grp1'
show_port = self.driver.neutron_client.show_port
show_port.return_value = {'port': {
'id': vip.port_id,
'device_owner': 'neutron:LOADBALANCERV2',
'security_groups': [sec_grp_id]}}
update_port = self.driver.neutron_client.update_port
delete_sec_grp = self.driver.neutron_client.delete_security_group
list_security_groups = self.driver.neutron_client.list_security_groups
security_groups = {
'security_groups': [
{'id': sec_grp_id}
]
}
list_security_groups.return_value = security_groups
self.driver.deallocate_vip(vip)
expected_port_update = {'port': {'security_groups': []}}
update_port.assert_called_once_with(vip.port_id, expected_port_update)
delete_sec_grp.assert_called_once_with(sec_grp_id)
def test_deallocate_vip_when_vip_port_not_found(self):
lb = dmh.generate_load_balancer_tree()
vip = data_models.Vip(port_id='1')
vip.load_balancer = lb
admin_project_id = 'octavia'
session_mock = mock.MagicMock()
session_mock.get_project_id.return_value = admin_project_id
self.k_session.return_value = session_mock
show_port = self.driver.neutron_client.show_port
show_port.side_effect = neutron_exceptions.PortNotFoundClient
self.driver.deallocate_vip(vip)
def test_plug_vip_errors_when_nova_cant_find_network_to_attach(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {
'subnet': {
'id': lb.vip.subnet_id
}
}
list_security_groups = self.driver.neutron_client.list_security_groups
lsc_side_effect = [
None, {
'security_groups': [
{'id': 'lb-sec-grp1'}
]
}
]
list_security_groups.side_effect = lsc_side_effect
network_attach = self.driver.compute.attach_network_or_port
network_attach.side_effect = nova_exceptions.NotFound(404, "Network")
self.assertRaises(network_base.PlugVIPException,
self.driver.plug_vip, lb, lb.vip)
def test_plug_vip_errors_when_neutron_cant_find_port_to_update(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {
'subnet': {
'id': lb.vip.subnet_id
}
}
list_security_groups = self.driver.neutron_client.list_security_groups
lsc_side_effect = [
None, {
'security_groups': [
{'id': 'lb-sec-grp1'}
]
}
]
list_security_groups.side_effect = lsc_side_effect
network_attach = self.driver.compute.attach_network_or_port
network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE
update_port = self.driver.neutron_client.update_port
update_port.side_effect = neutron_exceptions.PortNotFoundClient
self.assertRaises(network_base.PortNotFound,
self.driver.plug_vip, lb, lb.vip)
def test_plug_vip(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {
'subnet': {
'id': t_constants.MOCK_VIP_SUBNET_ID,
'network_id': t_constants.MOCK_VIP_NET_ID
}
}
list_ports = self.driver.neutron_client.list_ports
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
list_ports.side_effect = [{'ports': [port1]}, {'ports': [port2]}]
network_attach = self.driver.compute.attach_network_or_port
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1,
t_constants.MOCK_VRRP_INTERFACE2]
list_security_groups = self.driver.neutron_client.list_security_groups
list_security_groups.return_value = {
'security_groups': [
{'id': 'lb-sec-grp1'}
]
}
update_port = self.driver.neutron_client.update_port
expected_aap = {'port': {'allowed_address_pairs':
[{'ip_address': lb.vip.ip_address}]}}
amps = self.driver.plug_vip(lb, lb.vip)
self.assertEqual(5, update_port.call_count)
for amp in amps:
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
t_constants.MOCK_VRRP_IP2])
self.assertEqual(lb.vip.ip_address, amp.ha_ip)
def _set_safely(self, obj, name, value):
if isinstance(obj, dict):
current = obj.get(name)
self.addCleanup(obj.update, {name: current})
obj.update({name: value})
else:
current = getattr(obj, name)
self.addCleanup(setattr, obj, name, current)
setattr(obj, name, value)
def test_plug_vip_on_mgmt_net(self):
lb = dmh.generate_load_balancer_tree()
lb.vip.subnet_id = t_constants.MOCK_MANAGEMENT_SUBNET_ID
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {
'subnet': {
'id': t_constants.MOCK_MANAGEMENT_SUBNET_ID,
'network_id': t_constants.MOCK_MANAGEMENT_NET_ID
}
}
list_ports = self.driver.neutron_client.list_ports
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
self._set_safely(t_constants.MOCK_MANAGEMENT_FIXED_IPS1[0],
'ip_address', lb.amphorae[0].lb_network_ip)
self._set_safely(t_constants.MOCK_MANAGEMENT_FIXED_IPS2[0],
'ip_address', lb.amphorae[1].lb_network_ip)
list_ports.side_effect = [{'ports': [port1]}, {'ports': [port2]}]
network_attach = self.driver.compute.attach_network_or_port
self._set_safely(t_constants.MOCK_VRRP_INTERFACE1,
'net_id', t_constants.MOCK_MANAGEMENT_NET_ID)
self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS1[0],
'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID)
self._set_safely(t_constants.MOCK_VRRP_INTERFACE2,
'net_id', t_constants.MOCK_MANAGEMENT_NET_ID)
self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS2[0],
'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID)
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1,
t_constants.MOCK_VRRP_INTERFACE2]
list_security_groups = self.driver.neutron_client.list_security_groups
list_security_groups.return_value = {
'security_groups': [
{'id': 'lb-sec-grp1'}
]
}
update_port = self.driver.neutron_client.update_port
expected_aap = {'port': {'allowed_address_pairs':
[{'ip_address': lb.vip.ip_address}]}}
amps = self.driver.plug_vip(lb, lb.vip)
self.assertEqual(5, update_port.call_count)
for amp in amps:
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
t_constants.MOCK_VRRP_IP2])
self.assertEqual(lb.vip.ip_address, amp.ha_ip)
def test_allocate_vip_when_port_already_provided(self):
show_port = self.driver.neutron_client.show_port
show_port.return_value = t_constants.MOCK_NEUTRON_PORT
fake_lb_vip = data_models.Vip(
port_id=t_constants.MOCK_PORT_ID,
subnet_id=t_constants.MOCK_SUBNET_ID)
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
vip = self.driver.allocate_vip(fake_lb)
self.assertIsInstance(vip, data_models.Vip)
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
self.assertEqual(fake_lb.id, vip.load_balancer_id)
def test_allocate_vip_when_port_creation_fails(self):
fake_lb_vip = data_models.Vip(
subnet_id=t_constants.MOCK_SUBNET_ID)
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
create_port = self.driver.neutron_client.create_port
create_port.side_effect = Exception
self.assertRaises(network_base.AllocateVIPException,
self.driver.allocate_vip, fake_lb)
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
'_check_extension_enabled', return_value=True)
def test_allocate_vip_when_no_port_provided(self, mock_check_ext):
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
port_create_dict['port']['device_owner'] = (
allowed_address_pairs.OCTAVIA_OWNER)
port_create_dict['port']['device_id'] = 'lb-1'
create_port = self.driver.neutron_client.create_port
create_port.return_value = port_create_dict
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {'subnet': {
'id': t_constants.MOCK_SUBNET_ID,
'network_id': t_constants.MOCK_NETWORK_ID
}}
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
network_id=t_constants.MOCK_NETWORK_ID)
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
project_id='test-project')
vip = self.driver.allocate_vip(fake_lb)
exp_create_port_call = {
'port': {
'name': 'octavia-lb-1',
'network_id': t_constants.MOCK_NETWORK_ID,
'device_id': 'lb-1',
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
'admin_state_up': False,
'project_id': 'test-project',
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
}
}
create_port.assert_called_once_with(exp_create_port_call)
self.assertIsInstance(vip, data_models.Vip)
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
self.assertEqual(fake_lb.id, vip.load_balancer_id)
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
'_check_extension_enabled', return_value=False)
def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext):
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
port_create_dict['port']['device_owner'] = (
allowed_address_pairs.OCTAVIA_OWNER)
port_create_dict['port']['device_id'] = 'lb-1'
create_port = self.driver.neutron_client.create_port
create_port.return_value = port_create_dict
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = {'subnet': {
'id': t_constants.MOCK_SUBNET_ID,
'network_id': t_constants.MOCK_NETWORK_ID
}}
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
network_id=t_constants.MOCK_NETWORK_ID)
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
project_id='test-project')
vip = self.driver.allocate_vip(fake_lb)
exp_create_port_call = {
'port': {
'name': 'octavia-lb-1',
'network_id': t_constants.MOCK_NETWORK_ID,
'device_id': 'lb-1',
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
'admin_state_up': False,
'tenant_id': 'test-project',
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
}
}
create_port.assert_called_once_with(exp_create_port_call)
self.assertIsInstance(vip, data_models.Vip)
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
self.assertEqual(fake_lb.id, vip.load_balancer_id)
def test_unplug_vip_errors_when_update_port_cant_find_port(self):
lb = dmh.generate_load_balancer_tree()
list_ports = self.driver.neutron_client.list_ports
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = t_constants.MOCK_SUBNET
port1 = t_constants.MOCK_NEUTRON_PORT['port']
port2 = {
'id': '4', 'network_id': '3', 'fixed_ips':
[{'ip_address': '10.0.0.2'}]
}
list_ports.return_value = {'ports': [port1, port2]}
update_port = self.driver.neutron_client.update_port
update_port.side_effect = neutron_exceptions.PortNotFoundClient
self.assertRaises(network_base.UnplugVIPException,
self.driver.unplug_vip, lb, lb.vip)
def test_unplug_vip_errors_when_update_port_fails(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = t_constants.MOCK_SUBNET
port1 = t_constants.MOCK_NEUTRON_PORT['port']
port2 = {
'id': '4', 'network_id': '3', 'fixed_ips':
[{'ip_address': '10.0.0.2'}]
}
list_ports = self.driver.neutron_client.list_ports
list_ports.return_value = {'ports': [port1, port2]}
update_port = self.driver.neutron_client.update_port
update_port.side_effect = TypeError
self.assertRaises(network_base.UnplugVIPException,
self.driver.unplug_vip, lb, lb.vip)
def test_unplug_vip_errors_when_vip_subnet_not_found(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.side_effect = neutron_exceptions.NotFound
self.assertRaises(network_base.PluggedVIPNotFound,
self.driver.unplug_vip, lb, lb.vip)
def test_unplug_vip(self):
lb = dmh.generate_load_balancer_tree()
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = t_constants.MOCK_SUBNET
update_port = self.driver.neutron_client.update_port
port1 = t_constants.MOCK_NEUTRON_PORT['port']
port2 = {
'id': '4', 'network_id': '3', 'fixed_ips':
[{'ip_address': '10.0.0.2'}]
}
list_ports = self.driver.neutron_client.list_ports
list_ports.return_value = {'ports': [port1, port2]}
get_port = self.driver.neutron_client.get_port
get_port.side_effect = neutron_exceptions.NotFound
self.driver.unplug_vip(lb, lb.vip)
self.assertEqual(len(lb.amphorae), update_port.call_count)
clear_aap = {'port': {'allowed_address_pairs': []}}
update_port.assert_has_calls([mock.call(port1.get('id'), clear_aap),
mock.call(port1.get('id'), clear_aap)])
def test_plug_network_when_compute_instance_cant_be_found(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
network_attach = self.driver.compute.attach_network_or_port
network_attach.side_effect = nova_exceptions.NotFound(
404, message='Instance not found')
self.assertRaises(network_base.AmphoraNotFound,
self.driver.plug_network,
t_constants.MOCK_COMPUTE_ID, net_id)
def test_plug_network_when_network_cant_be_found(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
network_attach = self.driver.compute.attach_network_or_port
network_attach.side_effect = nova_exceptions.NotFound(
404, message='Network not found')
self.assertRaises(network_base.NetworkException,
self.driver.plug_network,
t_constants.MOCK_COMPUTE_ID, net_id)
def test_plug_network_when_interface_attach_fails(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
network_attach = self.driver.compute.attach_network_or_port
network_attach.side_effect = TypeError
self.assertRaises(network_base.PlugNetworkException,
self.driver.plug_network,
t_constants.MOCK_COMPUTE_ID, net_id)
def test_plug_network(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
network_attach = self.driver.compute.attach_network_or_port
network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE
oct_interface = self.driver.plug_network(
t_constants.MOCK_COMPUTE_ID, net_id)
exp_ips = [fixed_ip.get('ip_address')
for fixed_ip in t_constants.MOCK_NOVA_INTERFACE.fixed_ips]
actual_ips = [fixed_ip.ip_address
for fixed_ip in oct_interface.fixed_ips]
self.assertEqual(exp_ips, actual_ips)
self.assertEqual(t_constants.MOCK_COMPUTE_ID,
oct_interface.compute_id)
self.assertEqual(net_id, oct_interface.network_id)
def test_unplug_network_when_compute_port_cant_be_found(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
list_ports = self.driver.neutron_client.list_ports
list_ports.return_value = {'ports': []}
self.assertRaises(network_base.NetworkNotFound,
self.driver.unplug_network,
t_constants.MOCK_COMPUTE_ID, net_id)
def test_unplug_network_when_list_ports_fails(self):
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
list_ports = self.driver.neutron_client.list_ports
list_ports.side_effect = Exception
self.assertRaises(network_base.NetworkException,
self.driver.unplug_network,
t_constants.MOCK_COMPUTE_ID, net_id)
def test_unplug_network(self):
list_ports = self.driver.neutron_client.list_ports
port1 = t_constants.MOCK_NEUTRON_PORT['port']
port2 = {
'id': '4', 'network_id': '3', 'fixed_ips':
[{'ip_address': '10.0.0.2'}]
}
list_ports.return_value = {'ports': [port1, port2]}
port_detach = self.driver.compute.detach_port
self.driver.unplug_network(t_constants.MOCK_COMPUTE_ID,
port2.get('network_id'))
port_detach.assert_called_once_with(
compute_id=t_constants.MOCK_COMPUTE_ID, port_id=port2.get('id'))
def test_update_vip(self):
listeners = [data_models.Listener(protocol_port=80, peer_port=1024,
protocol=constants.PROTOCOL_TCP),
data_models.Listener(protocol_port=443, peer_port=1025,
protocol=constants.PROTOCOL_TCP),
data_models.Listener(protocol_port=50, peer_port=1026,
protocol=constants.PROTOCOL_UDP)]
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
fake_rules = {
'security_group_rules': [
{'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp'},
{'id': 'rule-22', 'port_range_max': 22, 'protocol': 'tcp'}
]
}
list_rules = self.driver.neutron_client.list_security_group_rules
list_rules.return_value = fake_rules
delete_rule = self.driver.neutron_client.delete_security_group_rule
create_rule = self.driver.neutron_client.create_security_group_rule
self.driver.update_vip(lb)
delete_rule.assert_called_once_with('rule-22')
expected_create_rule_1 = {
'security_group_rule': {
'security_group_id': 'secgrp-1',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 1024,
'port_range_max': 1024,
'ethertype': 'IPv4'
}
}
expected_create_rule_udp_peer = {
'security_group_rule': {
'security_group_id': 'secgrp-1',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 1026,
'port_range_max': 1026,
'ethertype': 'IPv4'
}
}
expected_create_rule_2 = {
'security_group_rule': {
'security_group_id': 'secgrp-1',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 1025,
'port_range_max': 1025,
'ethertype': 'IPv4'
}
}
expected_create_rule_3 = {
'security_group_rule': {
'security_group_id': 'secgrp-1',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 443,
'port_range_max': 443,
'ethertype': 'IPv4'
}
}
expected_create_rule_udp = {
'security_group_rule': {
'security_group_id': 'secgrp-1',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 50,
'port_range_max': 50,
'ethertype': 'IPv4'
}
}
create_rule.assert_has_calls([mock.call(expected_create_rule_1),
mock.call(expected_create_rule_udp_peer),
mock.call(expected_create_rule_2),
mock.call(expected_create_rule_3),
mock.call(expected_create_rule_udp)],
any_order=True)
def test_update_vip_when_listener_deleted(self):
listeners = [data_models.Listener(protocol_port=80,
protocol=constants.PROTOCOL_TCP),
data_models.Listener(
protocol_port=443,
protocol=constants.PROTOCOL_TCP,
provisioning_status=constants.PENDING_DELETE),
data_models.Listener(
protocol_port=50, protocol=constants.PROTOCOL_UDP,
provisioning_status=constants.PENDING_DELETE)]
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
fake_rules = {
'security_group_rules': [
{'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp'},
{'id': 'rule-22', 'port_range_max': 443, 'protocol': 'tcp'},
{'id': 'rule-udp-50', 'port_range_max': 50, 'protocol': 'tcp'}
]
}
list_rules = self.driver.neutron_client.list_security_group_rules
list_rules.return_value = fake_rules
delete_rule = self.driver.neutron_client.delete_security_group_rule
create_rule = self.driver.neutron_client.create_security_group_rule
self.driver.update_vip(lb)
delete_rule.assert_has_calls(
[mock.call('rule-22'), mock.call('rule-udp-50')])
self.assertTrue(create_rule.called)
def test_update_vip_when_no_listeners(self):
listeners = []
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
fake_rules = {
'security_group_rules': [
{'id': 'all-egress', 'protocol': None, 'direction': 'egress'},
{'id': 'ssh-rule', 'protocol': 'tcp', 'port_range_max': 22}
]
}
list_rules = self.driver.neutron_client.list_security_group_rules
list_rules.return_value = fake_rules
delete_rule = self.driver.neutron_client.delete_security_group_rule
self.driver.update_vip(lb)
delete_rule.assert_called_once_with('ssh-rule')
def test_update_vip_when_security_group_rule_deleted(self):
listeners = []
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
fake_rules = {
'security_group_rules': [
{'id': 'all-egress', 'protocol': None, 'direction': 'egress'},
{'id': 'ssh-rule', 'protocol': 'tcp', 'port_range_max': 22}
]
}
list_rules = self.driver.neutron_client.list_security_group_rules
list_rules.return_value = fake_rules
delete_rule = self.driver.neutron_client.delete_security_group_rule
delete_rule.side_effect = neutron_exceptions.NotFound
self.driver.update_vip(lb)
delete_rule.assert_called_once_with('ssh-rule')
def test_update_vip_when_security_group_missing(self):
listeners = []
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': []}
self.assertRaises(exceptions.MissingVIPSecurityGroup,
self.driver.update_vip,
lb)
@mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.'
'AllowedAddressPairsDriver._update_security_group_rules')
def test_update_vip_for_delete_when_security_group_missing(self,
update_rules):
listeners = []
vip = data_models.Vip(ip_address='10.0.0.2')
lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip)
list_sec_grps = self.driver.neutron_client.list_security_groups
list_sec_grps.return_value = {'security_groups': []}
self.driver.update_vip(lb, for_delete=True)
update_rules.assert_not_called()
def test_failover_preparation(self):
original_dns_integration_state = self.driver.dns_integration_enabled
self.driver.dns_integration_enabled = False
ports = {"ports": [
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1,
"ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1},
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2,
"ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2}]}
self.driver.neutron_client.list_ports.return_value = ports
self.driver.neutron_client.show_port = mock.Mock(
side_effect=self._failover_show_port_side_effect)
port_update = self.driver.neutron_client.update_port
amphora = data_models.Amphora(
id=self.AMPHORA_ID, load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID, status=self.ACTIVE,
lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID,
ha_ip=self.HA_IP)
self.driver.failover_preparation(amphora)
self.assertFalse(port_update.called)
self.driver.dns_integration_enabled = original_dns_integration_state
def test_failover_preparation_dns_integration(self):
ports = {"ports": [
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1,
"ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1},
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2,
"ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2}]}
original_dns_integration_state = self.driver.dns_integration_enabled
self.driver.dns_integration_enabled = True
self.driver.neutron_client.list_ports.return_value = ports
self.driver.neutron_client.show_port = mock.Mock(
side_effect=self._failover_show_port_side_effect)
port_update = self.driver.neutron_client.update_port
amphora = data_models.Amphora(
id=self.AMPHORA_ID, load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID, status=self.ACTIVE,
lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID,
ha_ip=self.HA_IP)
self.driver.failover_preparation(amphora)
port_update.assert_called_once_with(ports['ports'][1].get('id'),
{'port': {'dns_name': ''}})
self.driver.dns_integration_enabled = original_dns_integration_state
def _failover_show_port_side_effect(self, port_id):
if port_id == self.LB_NET_PORT_ID:
return {"fixed_ips": [{"subnet_id": self.SUBNET_ID_1,
"ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1}
if port_id == self.HA_PORT_ID:
return {"fixed_ips": [{"subnet_id": self.SUBNET_ID_2,
"ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2}
def test_plug_port(self):
port = mock.MagicMock()
port.id = self.PORT_ID
network_attach = self.driver.compute.attach_network_or_port
network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE
amphora = data_models.Amphora(
id=self.AMPHORA_ID, load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID, status=self.ACTIVE,
lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID,
ha_ip=self.HA_IP)
self.driver.plug_port(amphora, port)
network_attach.assert_called_once_with(compute_id=amphora.compute_id,
network_id=None,
ip_address=None,
port_id=self.PORT_ID)
# NotFound cases
network_attach.side_effect = nova_exceptions.NotFound(
1, message='Instance')
self.assertRaises(network_base.AmphoraNotFound,
self.driver.plug_port,
amphora,
port)
network_attach.side_effect = nova_exceptions.NotFound(
1, message='Network')
self.assertRaises(network_base.NetworkNotFound,
self.driver.plug_port,
amphora,
port)
network_attach.side_effect = nova_exceptions.NotFound(
1, message='bogus')
self.assertRaises(network_base.PlugNetworkException,
self.driver.plug_port,
amphora,
port)
# Already plugged case should not raise an exception
network_attach.side_effect = nova_exceptions.Conflict(1)
self.driver.plug_port(amphora, port)
# Unknown error case
network_attach.side_effect = TypeError
self.assertRaises(network_base.PlugNetworkException,
self.driver.plug_port,
amphora,
port)
def test_get_network_configs(self):
amphora_mock = mock.MagicMock()
load_balancer_mock = mock.MagicMock()
vip_mock = mock.MagicMock()
amphora_mock.status = constants.DELETED
load_balancer_mock.amphorae = [amphora_mock]
show_port = self.driver.neutron_client.show_port
show_port.return_value = t_constants.MOCK_NEUTRON_PORT
fake_subnet = {'subnet': {
'id': t_constants.MOCK_SUBNET_ID,
'gateway_ip': t_constants.MOCK_IP_ADDRESS,
'cidr': t_constants.MOCK_CIDR}}
show_subnet = self.driver.neutron_client.show_subnet
show_subnet.return_value = fake_subnet
configs = self.driver.get_network_configs(load_balancer_mock)
self.assertEqual({}, configs)
vip_mock.port_id = 1
amphora_mock.id = 222
amphora_mock.status = constants.ACTIVE
amphora_mock.vrrp_port_id = 2
amphora_mock.vrrp_ip = "10.0.0.1"
amphora_mock.ha_port_id = 3
amphora_mock.ha_ip = "10.0.0.2"
load_balancer_mock.amphorae = [amphora_mock]
configs = self.driver.get_network_configs(load_balancer_mock)
self.assertEqual(1, len(configs))
config = configs[222]
# TODO(ptoohill): find a way to return different items for multiple
# calls to the same method, right now each call to show subnet
# will return the same values if a method happens to call it
# multiple times for different subnets. We should be able to verify
# different requests get different expected data.
expected_port_id = t_constants.MOCK_NEUTRON_PORT['port']['id']
self.assertEqual(expected_port_id, config.ha_port.id)
self.assertEqual(expected_port_id, config.vrrp_port.id)
expected_subnet_id = fake_subnet['subnet']['id']
self.assertEqual(expected_subnet_id, config.ha_subnet.id)
self.assertEqual(expected_subnet_id, config.vrrp_subnet.id)
@mock.patch('time.sleep')
def test_wait_for_port_detach(self, mock_sleep):
amphora = data_models.Amphora(
id=self.AMPHORA_ID, load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID, status=self.ACTIVE,
lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID,
ha_ip=self.HA_IP)
ports = {"ports": [
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1,
"ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1},
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2,
"ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2}]}
show_port_1_without_device_id = {"fixed_ips": [
{"subnet_id": self.SUBNET_ID_1, "ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1,
"device_id": ''}
show_port_2_with_device_id = {"fixed_ips": [
{"subnet_id": self.SUBNET_ID_2, "ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2,
"device_id": self.DEVICE_ID}
show_port_2_without_device_id = {"fixed_ips": [
{"subnet_id": self.SUBNET_ID_2, "ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2,
"device_id": None}
self.driver.neutron_client.list_ports.return_value = ports
port_mock = mock.MagicMock()
port_mock.get = mock.Mock(
side_effect=[show_port_1_without_device_id,
show_port_2_with_device_id,
show_port_2_with_device_id,
show_port_2_without_device_id])
self.driver.neutron_client.show_port.return_value = port_mock
self.driver.wait_for_port_detach(amphora)
self.assertEqual(1, mock_sleep.call_count)
@mock.patch('time.time')
@mock.patch('time.sleep')
def test_wait_for_port_detach_timeout(self, mock_sleep, mock_time):
mock_time.side_effect = [1, 2, 6]
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="networking", port_detach_timeout=5)
amphora = data_models.Amphora(
id=self.AMPHORA_ID, load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID, status=self.ACTIVE,
lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID,
ha_ip=self.HA_IP)
ports = {"ports": [
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1,
"ip_address": self.IP_ADDRESS_1}],
"id": self.FIXED_IP_ID_1, "network_id": self.NETWORK_ID_1},
{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2,
"ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2}]}
show_port_1_with_device_id = {"fixed_ips": [
{"subnet_id": self.SUBNET_ID_2, "ip_address": self.IP_ADDRESS_2}],
"id": self.FIXED_IP_ID_2, "network_id": self.NETWORK_ID_2,
"device_id": self.DEVICE_ID}
self.driver.neutron_client.list_ports.return_value = ports
port_mock = mock.MagicMock()
port_mock.get = mock.Mock(
return_value=show_port_1_with_device_id)
self.driver.neutron_client.show_port.return_value = port_mock
self.assertRaises(network_base.TimeoutException,
self.driver.wait_for_port_detach,
amphora)
| 47.497143
| 79
| 0.634424
| 6,105
| 49,872
| 4.787551
| 0.061097
| 0.054742
| 0.052929
| 0.071609
| 0.811824
| 0.770973
| 0.738299
| 0.707746
| 0.67473
| 0.660018
| 0
| 0.01238
| 0.271174
| 49,872
| 1,049
| 80
| 47.542421
| 0.791735
| 0.023159
| 0
| 0.606029
| 0
| 0
| 0.079467
| 0.011091
| 0
| 0
| 0
| 0.000953
| 0.087318
| 1
| 0.047817
| false
| 0
| 0.018711
| 0
| 0.090437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d0548637a72123ee6c5ee2b4ec9a20c5d0fd264
| 63
|
py
|
Python
|
wpiformat/wpiformat/test/__init__.py
|
prateekma/styleguide
|
962c6cd6e316b71156d80e5751e8a76a01b60668
|
[
"BSD-3-Clause"
] | 19
|
2018-08-23T06:42:33.000Z
|
2022-01-31T05:09:19.000Z
|
wpiformat/wpiformat/test/__init__.py
|
prateekma/styleguide
|
962c6cd6e316b71156d80e5751e8a76a01b60668
|
[
"BSD-3-Clause"
] | 34
|
2016-08-22T20:38:35.000Z
|
2021-12-20T20:15:39.000Z
|
wpiformat/wpiformat/test/__init__.py
|
prateekma/styleguide
|
962c6cd6e316b71156d80e5751e8a76a01b60668
|
[
"BSD-3-Clause"
] | 12
|
2016-08-19T07:07:58.000Z
|
2021-12-08T06:21:30.000Z
|
import pytest
pytest.register_assert_rewrite("test.tasktest")
| 15.75
| 47
| 0.84127
| 8
| 63
| 6.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 3
| 48
| 21
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d34639467c1896ea95c4a819c169800445ee5c3
| 108
|
py
|
Python
|
test/test_cover.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
test/test_cover.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
test/test_cover.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
class TestCoverGeneration(TestCase):
def test_cover(self):
pass
| 15.428571
| 36
| 0.731481
| 12
| 108
| 6.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212963
| 108
| 6
| 37
| 18
| 0.917647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
5d5f5dded1ca55810114fc5874e044aa5a04d3f1
| 52
|
py
|
Python
|
kivymd/uix/pickers/colorpicker/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 1,111
|
2015-07-15T02:31:09.000Z
|
2022-03-29T17:22:02.000Z
|
kivymd/uix/pickers/colorpicker/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 706
|
2015-06-10T22:24:13.000Z
|
2022-03-31T16:22:39.000Z
|
kivymd/uix/pickers/colorpicker/__init__.py
|
marvelous-benji/KivyMD
|
4ab8dd339902597eaa9f8a4f9a80d8a6eb7d6053
|
[
"MIT"
] | 561
|
2015-07-15T04:57:23.000Z
|
2022-03-31T17:14:31.000Z
|
from .colorpicker import MDColorPicker # NOQA F401
| 26
| 51
| 0.807692
| 6
| 52
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 0.153846
| 52
| 1
| 52
| 52
| 0.886364
| 0.173077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d70d76958a440aaf73c049d1471abebb73bf4f7
| 9,242
|
py
|
Python
|
backends/ubpf/tests/ptf/ipv4_actions_test.py
|
anasyrmia/p4c-1
|
2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60
|
[
"Apache-2.0"
] | 487
|
2016-12-22T03:33:27.000Z
|
2022-03-29T06:36:45.000Z
|
backends/ubpf/tests/ptf/ipv4_actions_test.py
|
anasyrmia/p4c-1
|
2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60
|
[
"Apache-2.0"
] | 2,114
|
2016-12-18T11:36:27.000Z
|
2022-03-31T22:33:23.000Z
|
backends/ubpf/tests/ptf/ipv4_actions_test.py
|
anasyrmia/p4c-1
|
2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60
|
[
"Apache-2.0"
] | 456
|
2016-12-20T14:01:11.000Z
|
2022-03-30T19:26:05.000Z
|
#!/usr/bin/env python
# Copyright 2019 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ptf.mask import Mask
from ptf.packet import TCP, IP, Ether
from base_test import P4rtOVSBaseTest
from ptf.testutils import send_packet, verify_packets, simple_ip_packet
class Ipv4Test(P4rtOVSBaseTest):
def setUp(self):
P4rtOVSBaseTest.setUp(self)
self.del_flows()
self.unload_bpf_program()
self.load_bpf_program(path_to_program="build/test-ipv4-actions.o")
self.add_bpf_prog_flow(1,2)
self.add_bpf_prog_flow(2,1)
class Ipv4SetVersionTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="0 0 0 0 5 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", version=4) / TCP() / "Ala a un chat"
exp_pkt = Ether() / IP(src="192.168.1.1", version=5) / TCP() / "Ala a un chat"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIhlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="1 0 0 0 15 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=15)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetDiffservTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="2 0 0 0 255 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_tos=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_tos=255)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIdentificationTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="3 0 0 0 211 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_id=10)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_id=211)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFlagsTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="4 0 0 0 7 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", flags=0) / TCP() / "Ala a un chat"
exp_pkt = Ether() / IP(src="192.168.1.1", flags=7) / TCP() / "Ala a un chat"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragOffsetTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="5 0 0 0 13 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", frag=13) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetTtlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="6 0 0 0 60 0 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ttl=64)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ttl=60)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetProtocolTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="7 0 0 0 55 0 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1") / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", proto=55) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetSrcTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="8 0 0 0 2 2 168 192 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1")
exp_pkt = simple_ip_packet(ip_src="192.168.2.2")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetDstTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="9 0 0 0 2 2 168 192 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.1.2")
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.2.2")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetSrcDstTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="10 0 0 0 10 10 10 10 10 10 10 10")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_dst="192.168.1.2")
exp_pkt = simple_ip_packet(ip_src="10.10.10.10", ip_dst="10.10.10.10")
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetIhlDiffservTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="11 0 0 0 15 26 0 0 0 0 0 0")
def runTest(self):
pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=10, ip_tos=0)
exp_pkt = simple_ip_packet(ip_src="192.168.1.1", ip_ihl=15, ip_tos=26)
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragmentOffsetFlagTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="12 0 0 0 13 0 7 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0, flags=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", frag=13, flags=7) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFlagsTtlTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="13 0 0 0 7 50 0 0 0 0 0 0")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", flags=0, ttl=64) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="192.168.1.1", flags=7, ttl=50) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
class Ipv4SetFragOffsetSrcTest(Ipv4Test):
def setUp(self):
Ipv4Test.setUp(self)
self.update_bpf_map(map_id=0, key="1 1 168 192", value="14 0 0 0 255 31 0 0 255 255 255 255")
def runTest(self):
pkt = Ether() / IP(src="192.168.1.1", frag=0) / TCP() / "Ala ma kota"
exp_pkt = Ether() / IP(src="255.255.255.255", frag=8191) / TCP() / "Ala ma kota"
mask = Mask(exp_pkt)
mask.set_do_not_care_scapy(IP, 'chksum')
mask.set_do_not_care_scapy(TCP, 'chksum')
send_packet(self, (0, 1), pkt)
verify_packets(self, mask, device_number=0, ports=[2])
| 30.301639
| 101
| 0.623134
| 1,572
| 9,242
| 3.494275
| 0.108142
| 0.03641
| 0.038777
| 0.031313
| 0.777535
| 0.769161
| 0.765338
| 0.761697
| 0.761697
| 0.754597
| 0
| 0.114241
| 0.235663
| 9,242
| 304
| 102
| 30.401316
| 0.663364
| 0.061026
| 0
| 0.617978
| 0
| 0
| 0.140235
| 0.002886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.174157
| false
| 0
| 0.022472
| 0
| 0.286517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
538411c050ddde830cdd9c0c658ed57c0d8116ee
| 106
|
py
|
Python
|
route/file.py
|
dubovinszky/route-calculator
|
e2da6e351a25fcf4ebf98dc05b1d651ed291b7e8
|
[
"MIT"
] | null | null | null |
route/file.py
|
dubovinszky/route-calculator
|
e2da6e351a25fcf4ebf98dc05b1d651ed291b7e8
|
[
"MIT"
] | null | null | null |
route/file.py
|
dubovinszky/route-calculator
|
e2da6e351a25fcf4ebf98dc05b1d651ed291b7e8
|
[
"MIT"
] | null | null | null |
def get_file_contents(file_path):
with open(file_path, 'r') as content:
return content.read()
| 26.5
| 41
| 0.688679
| 16
| 106
| 4.3125
| 0.75
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198113
| 106
| 3
| 42
| 35.333333
| 0.811765
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
539ece508c5d9bf18f86fbbe46df243f8857393d
| 67
|
py
|
Python
|
code/__init__.py
|
nju-websoft/DRESSED
|
52d22630213ea1389c3f3663812afde1b833c328
|
[
"Apache-2.0"
] | 9
|
2019-12-06T15:05:54.000Z
|
2022-03-11T06:25:54.000Z
|
code/__init__.py
|
nju-websoft/DRESSED
|
52d22630213ea1389c3f3663812afde1b833c328
|
[
"Apache-2.0"
] | null | null | null |
code/__init__.py
|
nju-websoft/DRESSED
|
52d22630213ea1389c3f3663812afde1b833c328
|
[
"Apache-2.0"
] | 5
|
2020-03-18T15:11:09.000Z
|
2022-03-11T06:25:57.000Z
|
'''
@file: __init__.py.py
@author: qxLiu
@time: 2020/3/14 9:37
'''
| 11.166667
| 21
| 0.61194
| 12
| 67
| 3.083333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0.134328
| 67
| 5
| 22
| 13.4
| 0.465517
| 0.865672
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53b7abacea5bf83e18f9b16489c5fd5f74b2de38
| 110
|
py
|
Python
|
project/cloudmesh/cluster/providers/kubernetes.py
|
cybertraining-dsc/fa19-516-153
|
e6e3952a74f8a711221ea8e1c461567526267d51
|
[
"Apache-2.0"
] | null | null | null |
project/cloudmesh/cluster/providers/kubernetes.py
|
cybertraining-dsc/fa19-516-153
|
e6e3952a74f8a711221ea8e1c461567526267d51
|
[
"Apache-2.0"
] | null | null | null |
project/cloudmesh/cluster/providers/kubernetes.py
|
cybertraining-dsc/fa19-516-153
|
e6e3952a74f8a711221ea8e1c461567526267d51
|
[
"Apache-2.0"
] | 1
|
2019-09-20T02:13:45.000Z
|
2019-09-20T02:13:45.000Z
|
from .meta_cluster import MetaCluster
class KubernetesCluster(metaclass=MetaCluster):
def add(self):
pass
| 18.333333
| 47
| 0.809091
| 13
| 110
| 6.769231
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118182
| 110
| 6
| 48
| 18.333333
| 0.907216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
53edc9b4ed1c50ded5cb107d5d5785efa2cd75be
| 162
|
py
|
Python
|
imagepy/core/roi/__init__.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | 2
|
2020-04-17T12:57:55.000Z
|
2020-04-17T12:57:57.000Z
|
imagepy/core/roi/__init__.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | null | null | null |
imagepy/core/roi/__init__.py
|
BioinfoTongLI/imagepy
|
b86f33f20e872ee8b86471a9ddfbd5ad064fd64d
|
[
"BSD-4-Clause"
] | null | null | null |
from .lineroi import LineRoi
from .ovalroi import OvalRoi
from .pointroi import PointRoi
from .polygonroi import PolygonRoi
from .rectangleroi import RectangleRoi
| 32.4
| 38
| 0.851852
| 20
| 162
| 6.9
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117284
| 162
| 5
| 38
| 32.4
| 0.965035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53f861359ac0ab1c41de871cc265263a28577a95
| 46
|
py
|
Python
|
ML.1/src/lab3.py
|
jfnavarro/old_python_courses
|
fb500e8eeae6c5d10bf77e1ff52725627527222a
|
[
"MIT"
] | 1
|
2018-02-20T03:26:35.000Z
|
2018-02-20T03:26:35.000Z
|
ML.1/src/lab3.py
|
jfnavarro/BioInfo_ML_courses
|
fb500e8eeae6c5d10bf77e1ff52725627527222a
|
[
"MIT"
] | null | null | null |
ML.1/src/lab3.py
|
jfnavarro/BioInfo_ML_courses
|
fb500e8eeae6c5d10bf77e1ff52725627527222a
|
[
"MIT"
] | null | null | null |
'''
Created on Oct 1, 2011
@author: jose
'''
| 7.666667
| 22
| 0.586957
| 7
| 46
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0.217391
| 46
| 5
| 23
| 9.2
| 0.611111
| 0.804348
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54de6899bf45aff4c7554c74bbd7016c3880c7c0
| 75
|
py
|
Python
|
init_db.py
|
Armalon/BaseFlask
|
53679da1099cc2bfc310905651fa7fa9fa77ff35
|
[
"MIT"
] | null | null | null |
init_db.py
|
Armalon/BaseFlask
|
53679da1099cc2bfc310905651fa7fa9fa77ff35
|
[
"MIT"
] | 2
|
2021-10-06T19:53:48.000Z
|
2022-02-13T17:56:22.000Z
|
init_db.py
|
Armalon/BaseFlask
|
53679da1099cc2bfc310905651fa7fa9fa77ff35
|
[
"MIT"
] | null | null | null |
# Init Database from chat_schema.sql
from server import init_db
init_db()
| 15
| 36
| 0.8
| 13
| 75
| 4.384615
| 0.692308
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 75
| 4
| 37
| 18.75
| 0.890625
| 0.453333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
070d76276f82b26642f6f27d090a142d69125400
| 1,065
|
py
|
Python
|
ucb_cs61A/lab/lab12/tests/smallest-int.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | null | null | null |
ucb_cs61A/lab/lab12/tests/smallest-int.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2017-07-31T08:15:26.000Z
|
2017-07-31T08:15:26.000Z
|
ucb_cs61A/lab/lab12/tests/smallest-int.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2019-10-06T16:52:31.000Z
|
2019-10-06T16:52:31.000Z
|
test = {
'name': 'smallest-int',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM smallest_int;
11/11/2015 10:01:03|7
11/11/2015 13:53:36|7
11/11/2015 14:52:07|7
11/11/2015 15:36:00|7
11/11/2015 15:46:03|7
11/11/2015 16:11:56|7
11/11/2015 17:42:09|7
11/11/2015 11:49:59|8
11/12/2015 14:30:09|8
11/11/2015 9:57:49|9
11/11/2015 10:29:15|10
11/11/2015 11:18:22|10
11/11/2015 16:56:15|10
11/11/2015 10:04:51|11
11/11/2015 10:27:47|11
11/11/2015 11:04:43|11
11/11/2015 12:27:14|11
11/11/2015 12:52:33|11
11/11/2015 13:05:03|11
11/11/2015 13:48:29|11
""",
'hidden': False,
'locked': False
}
],
'ordered': False,
'scored': True,
'setup': r"""
sqlite> .read lab12.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
| 24.204545
| 45
| 0.43662
| 172
| 1,065
| 2.697674
| 0.366279
| 0.215517
| 0.327586
| 0.135776
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0.482972
| 0.393427
| 1,065
| 44
| 46
| 24.204545
| 0.235294
| 0
| 0
| 0.045455
| 0
| 0
| 0.776735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
071b1c765565478df1dd406b50fb44be86335821
| 31
|
py
|
Python
|
socfaker/__init__.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 122
|
2020-02-21T16:06:54.000Z
|
2022-03-21T13:53:03.000Z
|
socfaker/__init__.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 13
|
2020-01-29T16:37:05.000Z
|
2022-01-27T21:30:10.000Z
|
socfaker/__init__.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 20
|
2020-04-10T11:59:29.000Z
|
2022-02-10T09:20:26.000Z
|
from .socfaker import SocFaker
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
07487c7a0365aa5c7fa0fd09d78c5b10eed24166
| 134
|
py
|
Python
|
robosuite/devices/__init__.py
|
clj003/mod_surreal2
|
1c91ed1d85acdb8b82dae46c49153d547301d2d9
|
[
"MIT"
] | null | null | null |
robosuite/devices/__init__.py
|
clj003/mod_surreal2
|
1c91ed1d85acdb8b82dae46c49153d547301d2d9
|
[
"MIT"
] | null | null | null |
robosuite/devices/__init__.py
|
clj003/mod_surreal2
|
1c91ed1d85acdb8b82dae46c49153d547301d2d9
|
[
"MIT"
] | null | null | null |
# Takes away spacemouse since not a mac
from .device import Device
from .keyboard import Keyboard
#from .spacemouse import SpaceMouse
| 26.8
| 39
| 0.813433
| 19
| 134
| 5.736842
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 134
| 4
| 40
| 33.5
| 0.95614
| 0.529851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab2236b24a74fa00d1828dadec89603cf1ec0fc7
| 145
|
py
|
Python
|
Feature/Feature.py
|
NunoXu/UnbabelChallenge2016
|
680131bb80e1cb0a8d52033b3e75d0ed0e0eb70a
|
[
"MIT"
] | null | null | null |
Feature/Feature.py
|
NunoXu/UnbabelChallenge2016
|
680131bb80e1cb0a8d52033b3e75d0ed0e0eb70a
|
[
"MIT"
] | 5
|
2021-03-31T18:23:08.000Z
|
2022-01-13T00:38:59.000Z
|
Feature/Feature.py
|
NunoXu/UnbabelChallenge2016
|
680131bb80e1cb0a8d52033b3e75d0ed0e0eb70a
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class Feature(metaclass=ABCMeta):
@abstractmethod
def evaluate(self, sentence):
pass
| 14.5
| 39
| 0.710345
| 15
| 145
| 6.866667
| 0.866667
| 0.407767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22069
| 145
| 9
| 40
| 16.111111
| 0.911504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
ab2fb75563fa16e8f35357f6b2a5ff4c87cfa2a9
| 83
|
py
|
Python
|
src/kernel-graphql/riotapi/views.py
|
pseudonym117/kernel-graphql
|
0c0fc05ca84c525f1515953d77d853455db14fb1
|
[
"MIT"
] | 1
|
2021-03-17T16:35:09.000Z
|
2021-03-17T16:35:09.000Z
|
src/kernel-graphql/riotapi/views.py
|
pseudonym117/kernel-graphql
|
0c0fc05ca84c525f1515953d77d853455db14fb1
|
[
"MIT"
] | 1
|
2021-06-02T00:14:18.000Z
|
2021-06-02T00:14:18.000Z
|
src/kernel-graphql/riotapi/views.py
|
pseudonym117/kernel-graphql
|
0c0fc05ca84c525f1515953d77d853455db14fb1
|
[
"MIT"
] | null | null | null |
from . import riotapi
@riotapi.route('/')
def index():
return 'hello world!'
| 11.857143
| 25
| 0.638554
| 10
| 83
| 5.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 83
| 6
| 26
| 13.833333
| 0.791045
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
ab33ac6effdd8027526afc44891f3d0dd3ae6976
| 713
|
py
|
Python
|
test/test_model_statistics.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | 17
|
2020-02-04T16:42:40.000Z
|
2021-11-11T14:37:32.000Z
|
test/test_model_statistics.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | null | null | null |
test/test_model_statistics.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | 4
|
2020-02-04T12:59:04.000Z
|
2021-05-30T14:10:54.000Z
|
from unittest import TestCase
from brnolm.runtime.model_statistics import scaled_int_str
class ScaledIntRepreTests(TestCase):
def test_order_0(self):
self.assertEqual(scaled_int_str(0), '0')
def test_order_1(self):
self.assertEqual(scaled_int_str(10), '10')
def test_order_2(self):
self.assertEqual(scaled_int_str(210), '210')
def test_order_3(self):
self.assertEqual(scaled_int_str(3210), '3.2k')
def test_order_4(self):
self.assertEqual(scaled_int_str(43210), '43.2k')
def test_order_5(self):
self.assertEqual(scaled_int_str(543210), '543.2k')
def test_order_6(self):
self.assertEqual(scaled_int_str(6543210), '6.5M')
| 26.407407
| 58
| 0.695652
| 104
| 713
| 4.471154
| 0.336538
| 0.154839
| 0.206452
| 0.376344
| 0.466667
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0.089347
| 0.183731
| 713
| 26
| 59
| 27.423077
| 0.709622
| 0
| 0
| 0
| 0
| 0
| 0.035063
| 0
| 0
| 0
| 0
| 0
| 0.411765
| 1
| 0.411765
| false
| 0
| 0.117647
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.