hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57476587984e17ece720d64d289aa21890dba64a
| 3,520
|
py
|
Python
|
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | 1
|
2020-11-30T18:09:40.000Z
|
2020-11-30T18:09:40.000Z
|
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | null | null | null |
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | 1
|
2020-09-16T20:51:18.000Z
|
2020-09-16T20:51:18.000Z
|
import os
from docx import Document
from docx.shared import Inches
from docx import section
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from docx.shared import Cm
from docx.shared import RGBColor
import docx
class Print_document():
def start_doc(self):
self.document = Document()
def reinitialize_doc(self):
self.document = Document('Temp.docx')
def initialize_doc(self):
sections = self.document.sections
for section in sections:
section.top_margin = Cm(2.54)
section.bottom_margin = Cm(2.54)
section.left_margin = Cm(2.54)
section.right_margin = Cm(2.54)
style = self.document.styles['Normal']
font = style.font
font.name = 'Times New Roman'
font.size = Pt(14)
style = self.document.styles['Heading 2']
font1 = style.font
font1.name = 'TimesNewRoman'
font1.size = Pt(16)
header = self.document.sections[0].header
ht0=header.add_paragraph()
kh=ht0.add_run()
kh.add_picture('Pristine.png', width=Inches(2))
kh.alignment = WD_ALIGN_PARAGRAPH.LEFT
footer = self.document.sections[0].footer
f = footer.add_paragraph('All Rights Reserved by Pristine InfoSolutions Pvt. Ltd.')
f.alignment = WD_ALIGN_PARAGRAPH.CENTER
f.style = self.document.styles['Normal']
f.bold = True
f.size = Pt(16)
def setVname(self,Vname):
self.document.add_heading('Vulnerability Name:', 2)
p = self.document.add_paragraph(Vname)
p.style = self.document.styles['Normal']
def setTitle(self):
self.documeny.add_paragraph("Network")
def setVSeverity(self,severity):
p = self.document.add_heading('Severity', 2)
p.style = self.document.styles['Heading 2']
p.bold = True
p.size = Pt(16)
p.name = 'TimesNewRoman'
p = self.document.add_paragraph(severity)
p.style = self.document.styles['Normal']
def SetVdesc(self,VDesc):
vuldesh = self.document.add_heading('Vulnerability Description:', 2)
p = self.document.add_paragraph(VDesc)
def setVurl(self,Vurl):
self.document.add_heading('Vulnerable URL: ', 2)
p = self.document.add_paragraph(Vurl)
p.style = self.document.styles['Normal']
def setImg(self,Img):
self.document.add_heading('Proof of Concept: ',2)
if (Img):
lengthImg = len(Img[0])
for i in range (0,lengthImg):
self.document.add_picture(Img[0][i], width=Cm(15.95))
def setImpact(self,VImpact):
self.document.add_heading('Impact: ',2)
p = self.document.add_paragraph(VImpact)
p.style = self.document.styles['Normal']
def setVremed(self,Vrem):
self.document.add_heading('Remediation', 2 )
p = self.document.add_paragraph(Vrem)
p.style = self.document.styles['Normal']
def setConclusion(self,Conclusion):
self.document.add_heading('Conclusion', 2 )
p = self.document.add_paragraph(Conclusion)
p.style = self.document.styles['Normal']
def pageBreak(self):
self.document.add_page_break()
def Savedoc(self,name):
self.document.save(name[0] + '.docx')
def Savereport(self):
self.document.save('Temp.docx')
| 31.711712
| 92
| 0.609091
| 432
| 3,520
| 4.877315
| 0.266204
| 0.19364
| 0.121025
| 0.10916
| 0.321785
| 0.197437
| 0.093972
| 0
| 0
| 0
| 0
| 0.018039
| 0.275568
| 3,520
| 111
| 93
| 31.711712
| 0.808235
| 0
| 0
| 0.070588
| 0
| 0
| 0.093814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.105882
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5750825ae1de9236544f8dff0657979e541dfed6
| 764
|
py
|
Python
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
Pythobit/Python-tutorial
|
b0743eaa9c237c3578131ead1b3f2c295f11b7ee
|
[
"MIT"
] | 3
|
2021-02-19T18:33:00.000Z
|
2021-08-03T14:56:50.000Z
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
barawalojas/Python-tutorial
|
3f4b2b073e421888b3d62ff634658317d9abcb9b
|
[
"MIT"
] | 1
|
2021-07-10T14:37:57.000Z
|
2021-07-20T09:51:39.000Z
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
barawalojas/Python-tutorial
|
3f4b2b073e421888b3d62ff634658317d9abcb9b
|
[
"MIT"
] | 1
|
2021-08-02T05:39:38.000Z
|
2021-08-02T05:39:38.000Z
|
# Copying files
# Ask user for a list of 3 friends.
# for each friend, we'll tell user whether they're nearby.
# for each nearby friend, we'll save their name to `nearby_friends.txt`.
friends = input('Enter three friends name(separated by commas): ').split(',')
people = open('people.txt', 'r')
people_nearby = [line.strip() for line in people.readlines()]
people.close()
# Making set of friends and peoples
friends_set = set(friends)
people_nearby_set = set(people_nearby)
friends_nearby_set = friends_set.intersection(people_nearby_set)
nearby_friends_file = open('nearby_friends.txt', 'w')
for friend in friends_nearby_set:
print(f'{friend} is nearby.! Meet up with them.')
nearby_friends_file.write(f'{friend}\n')
nearby_friends_file.close()
| 27.285714
| 77
| 0.743455
| 119
| 764
| 4.605042
| 0.462185
| 0.142336
| 0.093066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001517
| 0.137435
| 764
| 27
| 78
| 28.296296
| 0.830046
| 0.27356
| 0
| 0
| 0
| 0
| 0.232601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5750d5afb4b68c06b08670b53610fc887297a148
| 722
|
py
|
Python
|
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, m, x = map(int, input().split())
ca = [0] * n
ca_sum = [0] * (m+1)
for i in range(n):
ca[i] = list(map(int, input().split()))
for j in range(m+1):
ca_sum[j] += ca[i][j]
ans = 10 ** 10
for i in range(2 ** n):
tmp = 0
tmp_ca_sum = ca_sum.copy()
for j, v in enumerate(format(i, r'0{}b'.format(n))):
if v == '0':
continue
for k in range(m+1):
tmp_ca_sum[k] -= ca[j][k]
flag = True
for v2 in tmp_ca_sum[1:]:
if v2 < x:
flag = False
break
if flag:
ans = min(ans, tmp_ca_sum[0])
if ans == 10 ** 10:
print(-1)
else:
print(ans)
| 21.235294
| 56
| 0.49723
| 128
| 722
| 2.71875
| 0.34375
| 0.100575
| 0.091954
| 0.091954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051335
| 0.325485
| 722
| 33
| 57
| 21.878788
| 0.663244
| 0
| 0
| 0
| 0
| 0
| 0.006925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
575730cc1be427336b55d40ef3a3e2821b465a72
| 1,210
|
py
|
Python
|
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | null | null | null |
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | null | null | null |
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | 1
|
2020-03-09T16:22:33.000Z
|
2020-03-09T16:22:33.000Z
|
from random import *
#STRATEGY SUMMARY: DON'T DUCK IF THE OPPONENT HAS NO SNOWBALLS. OTHERWISE, PICK RANDOMLY.
def getMove( myScore, mySnowballs, myDucksUsed, myMovesSoFar,
oppScore, oppSnowballs, oppDucksUsed, oppMovesSoFar ):
if mySnowballs == 10: #I have 10 snowballs, so I must throw
return "THROW"
elif oppSnowballs > 0: #If opponent does have snowballs...
if mySnowballs == 0: #...and if I have no snowballs left
if myDucksUsed == 5: #...and if I have no ducks left either, then must RELOAD
return "RELOAD"
else: #...otherwise, pick between DUCK and RELOAD
return choice([ "DUCK", "RELOAD" ])
elif myDucksUsed == 5: #If my opponent and I both have snowballs left, but I'm out of ducks
return choice([ "THROW", "RELOAD" ])
else: #I have no restrictions
return choice([ "THROW", "DUCK", "RELOAD" ])
else: #If my opponent is out of snowballs, then don't duck!
if mySnowballs == 0:
return "RELOAD"
else:
return choice([ "RELOAD", "THROW" ])
| 31.842105
| 99
| 0.565289
| 139
| 1,210
| 4.920863
| 0.395683
| 0.02924
| 0.030702
| 0.02924
| 0.035088
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011321
| 0.342975
| 1,210
| 37
| 100
| 32.702703
| 0.849057
| 0.356198
| 0
| 0.4
| 0
| 0
| 0.083117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
57580cabba2c7dce9e5d8666af96b5e694af9738
| 5,370
|
py
|
Python
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 91
|
2017-05-08T22:41:33.000Z
|
2022-02-09T11:37:07.000Z
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 63
|
2017-06-14T20:08:49.000Z
|
2021-06-16T23:08:25.000Z
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 26
|
2017-10-13T23:23:13.000Z
|
2022-01-11T16:58:17.000Z
|
"""
Expect action directives
"""
from __future__ import (
absolute_import,
unicode_literals,
)
from pyparsing import (
CaselessLiteral,
LineEnd,
Literal,
Optional,
Suppress,
)
from pysoa.test.plan.grammar.assertions import (
assert_not_expected,
assert_not_present,
assert_subset_structure,
)
from pysoa.test.plan.grammar.data_types import (
DataTypeGrammar,
get_parsed_data_type_value,
)
from pysoa.test.plan.grammar.directive import (
ActionDirective,
VarNameGrammar,
VarValueGrammar,
register_directive,
)
from pysoa.test.plan.grammar.tools import path_put
class ActionExpectsFieldValueDirective(ActionDirective):
"""
Set expectations for values to be in the service call response.
Using the ``not`` qualifier in the test will check to make sure that the field has any value other than the one
specified.
"""
@classmethod
def name(cls):
return 'expect_value'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
Optional(DataTypeGrammar) +
':' +
Optional(Literal('not')('not')) +
Literal('attribute value') +
':' +
VarNameGrammar +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
variable_name = parse_results.variable_name
path = 'expects'
if getattr(parse_results, 'not', None):
path = 'not_expects'
path_put(
action_case,
'{}.{}'.format(path, variable_name),
get_parsed_data_type_value(parse_results, parse_results.value),
)
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if 'expects' in action_case:
assert_subset_structure(
action_case.get('expects', {}),
action_response.body,
False,
msg,
)
if 'not_expects' in action_case:
assert_not_expected(
action_case['not_expects'],
action_response.body,
msg,
)
class ActionExpectsAnyDirective(ActionExpectsFieldValueDirective):
"""
Set expectations for values to be in the service call response where any value for the given data type will be
accepted.
"""
@classmethod
def name(cls):
return 'expect_any_value'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
Literal('any')('any') +
Optional(DataTypeGrammar) +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
class ActionExpectsNoneDirective(ActionExpectsFieldValueDirective):
"""
Set expectations for values to be in the service call response where ``None`` value is expected.
"""
@classmethod
def name(cls):
return 'expect_none'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
CaselessLiteral('None')('data_type') +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
class ActionExpectsNotPresentDirective(ActionDirective):
"""
Set expectation that the given field will not be present (even as a key) in the response.
"""
@classmethod
def name(cls):
return 'expect_not_present'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsNotPresentDirective, cls).get_full_grammar() +
Literal('expect not present') +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
path_put(
action_case,
'expects_not_present.{}'.format(parse_results.variable_name),
get_parsed_data_type_value(parse_results, parse_results.value),
)
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if 'expects_not_present' in action_case:
assert_not_present(
action_case['expects_not_present'],
action_response.body,
msg,
)
register_directive(ActionExpectsFieldValueDirective)
register_directive(ActionExpectsAnyDirective)
register_directive(ActionExpectsNoneDirective)
register_directive(ActionExpectsNotPresentDirective)
| 26.716418
| 115
| 0.60298
| 503
| 5,370
| 6.176938
| 0.214712
| 0.038622
| 0.036048
| 0.021886
| 0.550048
| 0.473125
| 0.420985
| 0.402317
| 0.348246
| 0.348246
| 0
| 0
| 0.309125
| 5,370
| 200
| 116
| 26.85
| 0.837466
| 0.096834
| 0
| 0.545455
| 0
| 0
| 0.065409
| 0.004612
| 0
| 0
| 0
| 0
| 0.058442
| 1
| 0.077922
| false
| 0
| 0.045455
| 0.051948
| 0.201299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
575a4a3127b8298acd5fe22aa043d391fe755667
| 1,821
|
py
|
Python
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
"""Tests for `prettyqt` package."""
import pathlib
import pytest
from prettyqt import core, qml
from prettyqt.utils import InvalidParamError
# def test_jsvalue():
# val = qml.JSValue(2)
# val["test"] = 1
# assert val["test"].toInt() == 1
# assert "test" in val
# assert val.get_value() == 2
def test_jsengine():
engine = qml.JSEngine()
engine.install_extensions("translation")
engine.eval("")
def test_qmlengine():
engine = qml.QmlEngine()
obj = core.Object()
engine.set_object_ownership(obj, "javascript")
with pytest.raises(InvalidParamError):
engine.set_object_ownership(obj, "test")
assert engine.get_object_ownership(obj) == "javascript"
engine.add_plugin_path("")
engine.add_import_path("")
engine.get_plugin_paths()
engine.get_import_paths()
def test_qmlapplicationengine(qtlog):
with qtlog.disabled():
engine = qml.QmlApplicationEngine()
for item in engine:
pass
path = pathlib.Path.cwd() / "tests" / "qmltest.qml"
engine.load_data(path.read_text())
def test_qmlcomponent():
comp = qml.QmlComponent()
assert comp.get_status() == "null"
# comp.load_url("", mode="asynchronous")
comp.get_url()
def test_jsvalue():
val = qml.JSValue(1)
assert val.get_error_type() is None
assert val.get_value() == 1
repr(val)
engine = qml.JSEngine()
val = engine.new_array(2)
val["test1"] = 1
val["test2"] = 2
assert val["test1"] == 1
assert "test2" in val
assert len(val) == 2
del val["test2"]
for n, v in val:
pass
val = qml.JSValue.from_object(None, engine)
val = qml.JSValue.from_object(1, engine)
val = qml.JSValue.from_object(["test"], engine)
val = qml.JSValue.from_object(dict(a="b"), engine)
| 24.945205
| 59
| 0.641406
| 235
| 1,821
| 4.821277
| 0.319149
| 0.03707
| 0.068844
| 0.060018
| 0.19241
| 0.124448
| 0
| 0
| 0
| 0
| 0
| 0.011947
| 0.218561
| 1,821
| 72
| 60
| 25.291667
| 0.784259
| 0.124657
| 0
| 0.081633
| 0
| 0
| 0.053729
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.102041
| false
| 0.040816
| 0.122449
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9386838c937de37405273fac5771d31ccf1a0479
| 2,550
|
py
|
Python
|
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
# Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 30
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False,
pixel_order=ORDER)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos*2)
g = int(255 - pos*2)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*2)
g = 0
b = int(pos*2)
else:
pos -= 170
r = 0
g = int(pos*2)
b = int(255 - pos*2)
return (255, 155, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
def white_breath():
x = 0
interval_time = 0.007
time.sleep(1)
while x == 0:
for i in range(255):
x = i
pixels.fill((x, x, x))
pixels.show()
time.sleep(interval_time)
while x == 254:
for i in range(255, 0, -1):
x = i
pixels.fill((i, i, i))
pixels.show()
time.sleep(interval_time)
def repeat_fun(times, f, *args):
for i in range(times):
f(*args)
try:
while True:
print("light start")
repeat_fun(5, white_breath)
# rainbow cycle with 1ms delay per step
repeat_fun(3, rainbow_cycle, 0.01)
# white_breath()
# for i in range(num_pixels):
# for r in range(255):
# pixels[i] = (r, 0, 0)
# pixels.show()
# time.sleep(0.001)
# j = i - 1
# for y in range(255):
# pixels[j] = (y, y, y)
# pixels.show()
# time.sleep(0.001)
# time.sleep(0.01)
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
| 25.757576
| 92
| 0.533333
| 376
| 2,550
| 3.558511
| 0.329787
| 0.041854
| 0.037369
| 0.041106
| 0.13154
| 0.110613
| 0
| 0
| 0
| 0
| 0
| 0.069113
| 0.358824
| 2,550
| 98
| 93
| 26.020408
| 0.749235
| 0.304314
| 0
| 0.118644
| 0
| 0
| 0.025685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.050847
| 0
| 0.135593
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93881978c162edde4ca5dd970ae7fc5d1d4dfecc
| 1,861
|
py
|
Python
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 15
|
2017-11-30T01:28:11.000Z
|
2021-08-12T09:17:36.000Z
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 71
|
2018-06-22T09:54:50.000Z
|
2020-10-21T07:10:54.000Z
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 2
|
2019-08-31T20:45:19.000Z
|
2019-10-02T18:26:58.000Z
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query module."""
from __future__ import print_function
from __future__ import unicode_literals
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class BaseQuery(BaseObject):
"""Base class for the definition of query execution classes."""
posix_only = False
def __init__(self, **opts):
"""Initialise new object."""
super(BaseQuery, self).__init__()
self.log_init()
self._opts = opts
self.log_init_done()
def query(self, *objects):
"""Check the object name type."""
self.log_method_enter(method=self.current_method)
for obj in objects:
if not isinstance(obj, basestring):
self.raise_type_error(arg=obj, cls=basestring)
obj = unicode(obj)
yield obj
@property
def host(self):
"""Get the configured IRR server hostname."""
return self.opts["host"]
@property
def port(self):
"""Get the configured IRR server port."""
return int(self.opts["port"])
@property
def target(self):
"""Construct a hostname:port pair for the IRR server."""
return "{}:{}".format(self.host, self.port)
| 27.776119
| 79
| 0.667383
| 239
| 1,861
| 5.079498
| 0.531381
| 0.041186
| 0.024712
| 0.032949
| 0.047776
| 0.047776
| 0
| 0
| 0
| 0
| 0
| 0.004237
| 0.239119
| 1,861
| 66
| 80
| 28.19697
| 0.853107
| 0.431488
| 0
| 0.205882
| 0
| 0
| 0.012846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.088235
| 0
| 0.382353
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9389cb7a39d34434b205d05068e576faba98ddc7
| 1,639
|
py
|
Python
|
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
import unittest
import pickle
from array import array
import complete_tdf
from floodberry.floodberry_ed25519 import GE25519
from tdf_strucs import TDFMatrix, TDFError
from complete_tdf import CTDFCodec as Codec, CTDFCipherText as CipherText
from utils import int_lst_to_bitarr
TEST_DIR = "legacy/tests/"
PACK_TEST_KEY_FILE = TEST_DIR + "ctdf_pack_test_keys.p"
PACK_TEST_CT_FILE = TEST_DIR + "ctdf_pack_test_ct.p"
TDF_KEY_FILE = TEST_DIR + "ctdf_test_keys.p"
TDF_CT_FILE = TEST_DIR + "ctdf_test_ct.p"
"""
x = [0,1,2]
ctdf = CTDFCodec(len(x)*8)
u = ctdf.encode(x)
result = ctdf.decode(u)
"""
TDF = Codec.deserialize(TDF_KEY_FILE)
CT = CipherText.deserialize(TDF_CT_FILE)
X = int_lst_to_bitarr([0,1,2], 3)
class TestCTDF(unittest.TestCase):
def test_packing(self):
tdf = Codec(16)
u = tdf.encode(array('i',[1, 2]))
tdf.serialize(PACK_TEST_KEY_FILE)
u.serialize(PACK_TEST_CT_FILE)
tdf1 = Codec.deserialize(PACK_TEST_KEY_FILE)
u1 = CipherText.deserialize(PACK_TEST_CT_FILE)
#call to_affine on all GE objects in codec
self.assertEqual(u.all_to_affine(), u1)
self.assertEqual(tdf.all_to_affine(), tdf1)
def test_encode(self):
ct = TDF.encode(X)
self.assertEqual(ct.all_to_affine(), CT.all_to_affine())
def test_decode(self):
result = TDF.decode(CT)
self.assertEqual(X, result)
def test_different_length_encode_decode(self):
ct_short = TDF.encode([2], c=3)
self.assertEqual(TDF.decode(ct_short), int_lst_to_bitarr([2], 3))
self.assertRaises(TDFError, TDF.encode, [3] * 100)
| 29.267857
| 73
| 0.700427
| 252
| 1,639
| 4.269841
| 0.281746
| 0.05948
| 0.040892
| 0.055762
| 0.087361
| 0.042751
| 0
| 0
| 0
| 0
| 0
| 0.025602
| 0.18975
| 1,639
| 55
| 74
| 29.8
| 0.784639
| 0.025015
| 0
| 0
| 0
| 0
| 0.055777
| 0.013944
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
939056f893dc7a63b3b4c5c9d0f8b92f4cb9205c
| 7,652
|
py
|
Python
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 138
|
2020-04-18T19:32:12.000Z
|
2022-03-31T06:58:33.000Z
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 19
|
2020-04-21T18:24:20.000Z
|
2022-03-12T00:25:11.000Z
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 19
|
2020-04-22T01:32:25.000Z
|
2022-03-24T02:52:01.000Z
|
import numpy as np
import scipy.io as sio
import os, glob, sys
import h5py_cache as h5c
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal')
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal/source')
from batch_gen_hdf5 import BatchGeneratorWithSceneMeshMatfile
import torch
'''
In this script, we put all mat files into a hdf5 file, so as to speed up the data loading process.
'''
dataset_path = '/mnt/hdd/PROX/snapshot_realcams_v3'
outfilename = 'realcams.hdf5'
h5file_path = os.path.join('/home/yzhang/Videos/PROXE', outfilename)
batch_gen = BatchGeneratorWithSceneMeshMatfile(dataset_path=dataset_path,
scene_verts_path = '/home/yzhang/Videos/PROXE/scenes_downsampled',
scene_sdf_path = '/home/yzhang/Videos/PROXE/scenes_sdf',
device=torch.device('cuda'))
### create the dataset used in the hdf5 file
with h5c.File(h5file_path, mode='w',chunk_cache_mem_size=1024**2*128) as hdf5_file:
while batch_gen.has_next_batch():
train_data = batch_gen.next_batch(1)
if train_data is None:
continue
train_data_np = [x.detach().cpu().numpy() for x in train_data[:-1]]
break
[depth_batch, seg_batch, body_batch,
cam_ext_batch, cam_int_batch, max_d_batch,
s_verts_batch, s_faces_batch,
s_grid_min_batch, s_grid_max_batch,
s_grid_dim_batch, s_grid_sdf_batch] = train_data_np
n_samples = batch_gen.n_samples
print('-- n_samples={:d}'.format(n_samples))
hdf5_file.create_dataset("sceneid", shape=(1,), chunks=True, dtype=np.float32, maxshape=(None,) )
hdf5_file.create_dataset("depth", shape=(1,)+tuple(depth_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(depth_batch.shape[1:]) )
hdf5_file.create_dataset("seg", shape=(1,)+tuple(seg_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(seg_batch.shape[1:]) )
hdf5_file.create_dataset("body", shape=(1,)+tuple(body_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(body_batch.shape[1:]) )
hdf5_file.create_dataset("cam_ext", shape=(1,)+tuple(cam_ext_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(cam_ext_batch.shape[1:]) )
hdf5_file.create_dataset("cam_int", shape=(1,)+tuple(cam_int_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(cam_int_batch.shape[1:]) )
hdf5_file.create_dataset("max_d", shape=(1,)+tuple(max_d_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(max_d_batch.shape[1:]) )
# hdf5_file.create_dataset("s_verts", shape=(1,)+tuple(s_verts_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_verts_batch.shape[1:]) )
# hdf5_file.create_dataset("s_faces", shape=(1,)+tuple(s_faces_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_faces_batch.shape[1:]) )
# hdf5_file.create_dataset("s_grid_min", shape=(1,)+tuple(s_grid_min_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_min_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_max", shape=(1,)+tuple(s_grid_max_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_max_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_dim", shape=(1,)+tuple(s_grid_dim_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_dim_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_sdf", shape=(1,)+tuple(s_grid_sdf_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_sdf_batch.shape[1:]))
batch_gen.reset()
scene_list = ['BasementSittingBooth','MPH1Library', 'MPH8', 'MPH11', 'MPH16',
'MPH112', 'N0SittingBooth', 'N0Sofa', 'N3Library', 'N3Office',
'N3OpenArea', 'Werkraum'] # !!!! important!cat
### create the dataset used in the hdf5 file
idx = -1
while batch_gen.has_next_batch():
train_data = batch_gen.next_batch(1)
if train_data is None:
continue
[depth_batch, seg_batch, body_batch,
cam_ext_batch, cam_int_batch, max_d_batch,
s_verts_batch, s_faces_batch,
s_grid_min_batch, s_grid_max_batch,
s_grid_dim_batch, s_grid_sdf_batch, filename_list] = train_data
## check unavaliable prox fitting
body_z_batch = body_batch[:,2]
if body_z_batch.abs().max() >= max_d_batch.abs().max():
print('-- encountered bad prox fitting. Skip it')
continue
if body_z_batch.min() <=0:
print('-- encountered bad prox fitting. Skip it')
continue
idx = idx+1
print('-- processing batch idx {:d}'.format(idx))
filename = filename_list[0]
scenename = filename.split('/')[-2].split('_')[0]
sid = [scene_list.index(scenename)]
hdf5_file["sceneid"].resize((hdf5_file["sceneid"].shape[0]+1, ))
hdf5_file["sceneid"][-1,...] = sid[0]
hdf5_file["depth"].resize((hdf5_file["depth"].shape[0]+1, )+hdf5_file["depth"].shape[1:])
hdf5_file["depth"][-1,...] = depth_batch[0].detach().cpu().numpy()
hdf5_file["seg"].resize((hdf5_file["seg"].shape[0]+1, )+hdf5_file["seg"].shape[1:])
hdf5_file["seg"][-1,...] = seg_batch[0].detach().cpu().numpy()
hdf5_file["body"].resize((hdf5_file["body"].shape[0]+1, )+hdf5_file["body"].shape[1:])
hdf5_file["body"][-1,...] = body_batch[0].detach().cpu().numpy()
hdf5_file["cam_ext"].resize((hdf5_file["cam_ext"].shape[0]+1, )+hdf5_file["cam_ext"].shape[1:])
hdf5_file["cam_ext"][-1,...] = cam_ext_batch[0].detach().cpu().numpy()
hdf5_file["cam_int"].resize((hdf5_file["cam_int"].shape[0]+1, )+hdf5_file["cam_int"].shape[1:])
hdf5_file["cam_int"][-1,...] = cam_int_batch[0].detach().cpu().numpy()
hdf5_file["max_d"].resize((hdf5_file["max_d"].shape[0]+1, )+hdf5_file["max_d"].shape[1:])
hdf5_file["max_d"][-1,...] = max_d_batch[0].detach().cpu().numpy()
# hdf5_file["s_verts"].resize((hdf5_file["s_verts"].shape[0]+1, )+hdf5_file["s_verts"].shape[1:])
# hdf5_file["s_verts"][-1,...] = s_verts_batch[0].detach().cpu().numpy()
# hdf5_file["s_faces"].resize((hdf5_file["s_faces"].shape[0]+1, )+hdf5_file["s_faces"].shape[1:])
# hdf5_file["s_faces"][-1,...] = s_faces_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_min"].resize((hdf5_file["s_grid_min"].shape[0]+1, )+hdf5_file["s_grid_min"].shape[1:])
# hdf5_file["s_grid_min"][-1,...] = s_grid_min_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_max"].resize((hdf5_file["s_grid_max"].shape[0]+1, )+hdf5_file["s_grid_max"].shape[1:])
# hdf5_file["s_grid_max"][-1,...] = s_grid_max_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_dim"].resize((hdf5_file["s_grid_dim"].shape[0]+1, )+hdf5_file["s_grid_dim"].shape[1:])
# hdf5_file["s_grid_dim"][-1,...] = s_grid_dim_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_sdf"].resize((hdf5_file["s_grid_sdf"].shape[0]+1, )+hdf5_file["s_grid_sdf"].shape[1:])
# hdf5_file["s_grid_sdf"][-1,...] = s_grid_sdf_batch[0].detach().cpu().numpy()
print('--file converting finish')
| 49.688312
| 176
| 0.627418
| 1,114
| 7,652
| 4.01526
| 0.137343
| 0.121619
| 0.072435
| 0.071987
| 0.671138
| 0.583501
| 0.501453
| 0.426559
| 0.334898
| 0.262911
| 0
| 0.038189
| 0.18897
| 7,652
| 153
| 177
| 50.013072
| 0.682565
| 0.29012
| 0
| 0.25974
| 0
| 0
| 0.1322
| 0.045703
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.077922
| 0
| 0.077922
| 0.064935
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93938181b040ac3ac5f94151cbff662943eef747
| 3,324
|
py
|
Python
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | 11
|
2021-11-17T23:51:55.000Z
|
2022-03-17T20:38:14.000Z
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | 4
|
2022-02-21T02:16:06.000Z
|
2022-03-28T02:18:16.000Z
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from fontbro import Font
from tests import AbstractTestCase
class NamesTestCase(AbstractTestCase):
"""
Test case for the methods related to the font names.
"""
def test_get_name_by_id(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
family_name = font.get_name(Font.NAME_FAMILY_NAME)
self.assertEqual(family_name, "Roboto Mono")
def test_get_name_by_key(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
family_name = font.get_name("family_name")
self.assertEqual(family_name, "Roboto Mono")
def test_get_name_by_invalid_type(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
with self.assertRaises(TypeError):
family_name = font.get_name(font)
def test_get_name_by_invalid_key(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
with self.assertRaises(KeyError):
name = font.get_name("invalid_key")
# self.assertEqual(name, None)
def test_get_name_by_invalid_id(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
name = font.get_name(999999999)
self.assertEqual(name, None)
def test_get_names(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
font_names = font.get_names()
# self._print(font_names)
expected_keys = [
"copyright_notice",
"designer",
"designer_url",
"family_name",
"full_name",
"license_description",
"license_info_url",
"postscript_name",
"subfamily_name",
"trademark",
"unique_identifier",
"vendor_url",
"version",
]
expected_keys_in = [key in font_names for key in expected_keys]
self.assertTrue(all(expected_keys_in))
self.assertEqual(font_names["family_name"], "Roboto Mono")
self.assertEqual(font_names["subfamily_name"], "Regular")
self.assertEqual(font_names["full_name"], "Roboto Mono Regular")
self.assertEqual(font_names["postscript_name"], "RobotoMono-Regular")
def test_set_name(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
font.set_name(Font.NAME_FAMILY_NAME, "Roboto Mono Renamed")
self.assertEqual(font.get_name(Font.NAME_FAMILY_NAME), "Roboto Mono Renamed")
def test_set_name_by_invalid_key(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
with self.assertRaises(KeyError):
font.set_name("invalid_family_name_key", "Roboto Mono Renamed")
def test_set_names(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
font.set_names(
{
Font.NAME_FAMILY_NAME: "Roboto Mono Renamed",
Font.NAME_SUBFAMILY_NAME: "Regular Renamed",
}
)
family_name = font.get_name(Font.NAME_FAMILY_NAME)
self.assertEqual(family_name, "Roboto Mono Renamed")
subfamily_name = font.get_name(Font.NAME_SUBFAMILY_NAME)
self.assertEqual(subfamily_name, "Regular Renamed")
| 38.206897
| 85
| 0.65343
| 404
| 3,324
| 5.054455
| 0.160891
| 0.088149
| 0.052889
| 0.066112
| 0.621939
| 0.580313
| 0.523996
| 0.471107
| 0.432909
| 0.432909
| 0
| 0.003943
| 0.237064
| 3,324
| 86
| 86
| 38.651163
| 0.801262
| 0.038508
| 0
| 0.223881
| 0
| 0
| 0.263373
| 0.12618
| 0
| 0
| 0
| 0
| 0.208955
| 1
| 0.134328
| false
| 0
| 0.029851
| 0
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
939786f9e786e13e34a09c07c33b9d33a5fb6c2c
| 1,273
|
py
|
Python
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-23T05:24:19.000Z
|
2022-02-17T16:37:51.000Z
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 5
|
2020-10-01T05:08:37.000Z
|
2020-10-12T03:18:10.000Z
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-28T14:06:41.000Z
|
2021-10-19T18:32:28.000Z
|
import os
from pathlib import Path
from shutil import rmtree
# change your parent dir accordingly
try:
directory = "TempDir"
parent_dir = "E:/GitHub/1) Git_Tutorials_Repo_Projects/core-python/Core_Python/"
td1, td2 = "TempA", "TempA"
path = os.path.join(parent_dir, directory)
temp_mul_dirs = os.path.join(path + os.sep + os.sep, td1 + os.sep + os.sep + td2)
''' This methods used to remove single file. all three methods used to delete symlink too'''
os.remove(path +os.sep+os.sep+"TempFile.txt")
os.unlink(path +os.sep+os.sep+td1+os.sep+os.sep+"TempFilea.txt")
''' we can also use this syntax pathlib.Path(path +os.sep+os.sep+"TempFile.txt").unlink() '''
f_path = Path(temp_mul_dirs +os.sep+os.sep+"TempFileb.txt")
f_path.unlink();
''' both methods for delete empty dir if single dir we can use rmdir if nested the
removedirs'''
# os.remove(path)
# os.removedirs(path+os.sep+os.sep+td1)
print("List of dirs before remove : ",os.listdir(path))
''' For remove non empty directory we have to use shutil.rmtree and pathlib.Path(path),rmdir()'''
rmtree(path+os.sep+os.sep+td1)
Path(path).rmdir()
print("List of dirs after remove : ",os.listdir(parent_dir))
except Exception as e:
print(e)
| 47.148148
| 101
| 0.683425
| 207
| 1,273
| 4.140097
| 0.362319
| 0.105018
| 0.073512
| 0.105018
| 0.161027
| 0.161027
| 0.121354
| 0.063011
| 0.063011
| 0.063011
| 0
| 0.007634
| 0.176748
| 1,273
| 27
| 102
| 47.148148
| 0.810115
| 0.069128
| 0
| 0
| 0
| 0
| 0.222362
| 0.065327
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.157895
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
939b63bdfc91f71662536be6efe59324a01bcaa9
| 587
|
py
|
Python
|
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | 1
|
2019-06-27T11:34:13.000Z
|
2019-06-27T11:34:13.000Z
|
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.color import WheelColor
from echomesh.util.TestCase import TestCase
EXPECTED = [
[ 0., 1., 0.],
[ 0.3, 0.7, 0. ],
[ 0.6, 0.4, 0. ],
[ 0.9, 0.1, 0. ],
[ 0. , 0.2, 0.8],
[ 0. , 0.5, 0.5],
[ 0. , 0.8, 0.2],
[ 0.9, 0. , 0.1],
[ 0.6, 0. , 0.4],
[ 0.3, 0. , 0.7],
[ 0., 1., 0.]]
class TestWheelColor(TestCase):
def test_several(self):
result = [WheelColor.wheel_color(r / 10.0) for r in range(11)]
self.assertArrayEquals(result, EXPECTED)
| 25.521739
| 82
| 0.558773
| 96
| 587
| 3.322917
| 0.416667
| 0.062696
| 0.037618
| 0.025078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126126
| 0.243612
| 587
| 22
| 83
| 26.681818
| 0.592342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.263158
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
939e7757a3e174c6114642e42e77179f804882a6
| 779
|
py
|
Python
|
notebook/demo/src/multifuns.py
|
marketmodelbrokendown/1
|
587283fd972d0060815dde82a57667e74765c9ae
|
[
"MIT"
] | 2
|
2019-03-13T15:34:42.000Z
|
2019-03-13T15:34:47.000Z
|
notebook/demo/src/multifuns.py
|
hervey-su/home
|
655b9e7b8180592742a132832795170a00debb47
|
[
"MIT"
] | 1
|
2020-11-18T21:55:20.000Z
|
2020-11-18T21:55:20.000Z
|
notebook/demo/src/multifuns.py
|
marketmodelbrokendown/1
|
587283fd972d0060815dde82a57667e74765c9ae
|
[
"MIT"
] | null | null | null |
from ctypes import cdll,c_int,c_double,POINTER
_lib = cdll.LoadLibrary('./demo/bin/libmultifuns.dll')
# double dprod(double *x, int n)
def dprod(x):
_lib.dprod.argtypes = [POINTER(c_double), c_int]
_lib.dprod.restype = c_double
n = len(x)
# convert a Python list into a C array by using ctypes
arr= (c_double * n)(*x)
return _lib.dprod(arr,int(n))
# int factorial(int n)
def factorial(n):
_lib.factorial.argtypes = [c_int]
_lib.factorial.restype = c_int
return _lib.factorial(n)
# int isum(int array[], int size);
def isum(x):
_lib.sum.argtypes = [POINTER(c_int), c_int]
_lib.sum.restype =c_int
n = len(x)
# convert a Python list into a C array by using ctypes
arr= (c_int * n)(*x)
return _lib.sum(arr,int(n))
| 26.862069
| 59
| 0.658537
| 131
| 779
| 3.748092
| 0.267176
| 0.065173
| 0.04277
| 0.04888
| 0.207739
| 0.207739
| 0.207739
| 0.207739
| 0.207739
| 0.207739
| 0
| 0
| 0.206675
| 779
| 28
| 60
| 27.821429
| 0.794498
| 0.24647
| 0
| 0.111111
| 0
| 0
| 0.046552
| 0.046552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.055556
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a10bd2227db590b05aec0efe907cfefee1e40e
| 843
|
py
|
Python
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 2
|
2019-05-07T20:23:59.000Z
|
2020-04-26T11:18:38.000Z
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 89
|
2019-08-06T12:47:50.000Z
|
2022-03-28T04:03:25.000Z
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 1
|
2020-06-23T10:07:38.000Z
|
2020-06-23T10:07:38.000Z
|
from nivo_api.core.db.connection import metadata, create_database_connections
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError
def is_postgis_installed(engine: Engine) -> bool:
try:
engine.execute("SELECT postgis_version()")
return True
except ProgrammingError:
return False
def create_schema_and_table(drop: bool) -> None:
schema = ["bra", "nivo", "flowcapt"]
db_con = create_database_connections()
if not is_postgis_installed(db_con.engine):
db_con.engine.execute("CREATE EXTENSION postgis")
if drop:
metadata.drop_all(db_con.engine)
[db_con.engine.execute(f"DROP SCHEMA IF EXISTS {s} CASCADE") for s in schema]
[db_con.engine.execute(f"CREATE SCHEMA IF NOT EXISTS {s}") for s in schema]
metadata.create_all(db_con.engine)
| 32.423077
| 85
| 0.720047
| 116
| 843
| 5.043103
| 0.396552
| 0.059829
| 0.112821
| 0.092308
| 0.133333
| 0.099145
| 0.099145
| 0
| 0
| 0
| 0
| 0
| 0.185053
| 843
| 25
| 86
| 33.72
| 0.851528
| 0
| 0
| 0
| 0
| 0
| 0.150652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a32ef6fddce5cbc92f060b72225c59adf371f7
| 515
|
py
|
Python
|
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
layout = [
[sg.Text("Wie heißt Du?")],
[sg.Input(key = "-INPUT-")],
[sg.Text(size = (40, 1), key = "-OUTPUT-")],
[sg.Button("Okay"), sg.Button("Quit")]
]
window = sg.Window("Hallo PySimpleGUI", layout)
keep_going = True
while keep_going:
event, values = window.read()
if event == sg.WINDOW_CLOSED or event == "Quit":
keep_going = False
window["-OUTPUT-"].update("Hallöchen " + values["-INPUT-"] + "!")
window.close()
| 24.52381
| 69
| 0.557282
| 62
| 515
| 4.564516
| 0.548387
| 0.095406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.248544
| 515
| 21
| 70
| 24.52381
| 0.723514
| 0
| 0
| 0
| 0
| 0
| 0.160853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a5a387bf24ca83ae37f5241ea161f3010ef4cf
| 3,247
|
py
|
Python
|
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pathlib
from torch.utils.data import Dataset, DataLoader
import dgl
import torch
from dgl.data.utils import load_graphs
import json
from datasets import util
from tqdm import tqdm
class FusionGalleryDataset(Dataset):
@staticmethod
def num_classes():
return 8
def __init__(
self,
root_dir,
split="train",
center_and_scale=True,
):
"""
Load the Fusion Gallery dataset from:
Joseph G. Lambourne, Karl D. D. Willis, Pradeep Kumar Jayaraman, Aditya Sanghi,
Peter Meltzer, Hooman Shayani. "BRepNet: A topological message passing system
for solid models," CVPR 2021.
:param root_dir: Root path to the dataset
:param split: string Whether train, val or test set
"""
path = pathlib.Path(root_dir)
assert split in ("train", "val", "test")
with open(str(path.joinpath("train_test.json")), "r") as read_file:
filelist = json.load(read_file)
# NOTE: Using a held out out validation set may be better.
# But it's not easy to perform stratified sampling on some rare classes
# which only show up on a few solids.
if split in ("train", "val"):
split_filelist = filelist["train"]
else:
split_filelist = filelist["test"]
self.center_and_scale = center_and_scale
all_files = []
# Load graphs and store their filenames for loading labels next
for fn in split_filelist:
all_files.append(path.joinpath("graph").joinpath(fn + ".bin"))
# Load labels from the json files in the subfolder
self.files = []
self.graphs = []
print(f"Loading {split} data...")
for fn in tqdm(all_files):
if not fn.exists():
continue
graph = load_graphs(str(fn))[0][0]
label = np.loadtxt(
path.joinpath("breps").joinpath(fn.stem + ".seg"), dtype=np.int, ndmin=1
)
if label.size != graph.number_of_nodes():
# Skip files where the number of faces and labels don't match
# print(
# f"WARN: number of faces and labels do not match in {fn.stem}: {label.size} vs. {graph.number_of_nodes()}"
# )
continue
self.files.append(fn)
graph.ndata["y"] = torch.tensor(label).long()
self.graphs.append(graph)
if self.center_and_scale:
for i in range(len(self.graphs)):
self.graphs[i].ndata["x"] = util.center_and_scale_uvsolid(
self.graphs[i].ndata["x"]
)
def __len__(self):
return len(self.graphs)
def __getitem__(self, idx):
graph = self.graphs[idx]
return graph
def _collate(self, batch):
bg = dgl.batch(batch)
return bg
def get_dataloader(self, batch_size=128, shuffle=True):
return DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=self._collate,
num_workers=0, # Can be set to non-zero on Linux
drop_last=True,
)
| 32.47
| 128
| 0.57838
| 408
| 3,247
| 4.477941
| 0.428922
| 0.038314
| 0.038314
| 0.01642
| 0.042693
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005489
| 0.326763
| 3,247
| 99
| 129
| 32.79798
| 0.830284
| 0.248537
| 0
| 0.058824
| 0
| 0
| 0.039713
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 1
| 0.088235
| false
| 0
| 0.132353
| 0.044118
| 0.308824
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a73833278709acd49bb46a9f2c8ae73acf367a
| 3,690
|
py
|
Python
|
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import smooth_l1_loss
from mmdet.models.dense_heads.ssd_head import SSDHead
@HEADS.register_module()
class CustomSSDHead(SSDHead):
def __init__(
self,
*args,
bg_loss_weight=-1.0,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0
),
**kwargs
):
super().__init__(*args, **kwargs)
self.loss_cls = build_loss(loss_cls)
self.bg_loss_weight = bg_loss_weight
def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
"""Compute loss of a single image.
Args:
cls_score (Tensor): Box scores for eachimage
Has shape (num_total_anchors, num_classes).
bbox_pred (Tensor): Box energies / deltas for each image
level with shape (num_total_anchors, 4).
anchors (Tensor): Box reference for each scale level with shape
(num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(num_total_anchors,).
label_weights (Tensor): Label weights of each anchor with shape
(num_total_anchors,)
bbox_targets (Tensor): BBox regression targets of each anchor wight
shape (num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (num_total_anchors, 4).
num_total_samples (int): If sampling, num total samples equal to
the number of total anchors; Otherwise, it is the number of
positive anchors.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
pos_inds = ((labels >= 0) &
(labels < self.num_classes)).nonzero().reshape(-1)
neg_inds = (labels == self.num_classes).nonzero().view(-1)
# Re-weigting BG loss
label_weights = label_weights.reshape(-1)
if self.bg_loss_weight >= 0.0:
neg_indices = (labels == self.num_classes)
label_weights = label_weights.clone()
label_weights[neg_indices] = self.bg_loss_weight
loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
if len(loss_cls_all.shape) > 1:
loss_cls_all = loss_cls_all.sum(-1)
num_pos_samples = pos_inds.size(0)
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
if num_neg_samples > neg_inds.size(0):
num_neg_samples = neg_inds.size(0)
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
loss_bbox = smooth_l1_loss(
bbox_pred,
bbox_targets,
bbox_weights,
beta=self.train_cfg.smoothl1_beta,
avg_factor=num_total_samples)
return loss_cls[None], loss_bbox
| 40.108696
| 79
| 0.622493
| 478
| 3,690
| 4.497908
| 0.301255
| 0.058605
| 0.042326
| 0.065116
| 0.163721
| 0.10186
| 0.091163
| 0.04
| 0.04
| 0
| 0
| 0.011525
| 0.29458
| 3,690
| 91
| 80
| 40.549451
| 0.814445
| 0.352575
| 0
| 0
| 0
| 0
| 0.009062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.06
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a84d645ccedf01c50e4963b06e5f5cf6720d08
| 2,918
|
py
|
Python
|
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | 2
|
2021-07-22T07:35:48.000Z
|
2022-03-03T05:48:08.000Z
|
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | null | null | null |
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | 2
|
2021-03-11T14:50:05.000Z
|
2021-04-18T14:58:24.000Z
|
"""
Copyright 2020 daduz11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Firstly this script is used for the conversion of the freezed inference graph (pb format) into a CoreML model.
Moreover the same script takes the CoreML model at 32bit precision to carries out the quantization from 16 to 1 bit.
"""
import argparse
import sys
import tfcoreml
import coremltools
from coremltools.models.neural_network import quantization_utils
def main(args):
if args.type == 'FLOAT32':
if args.model_dir[-3:] != '.pb':
print("Error: the model type must be .pb file")
return
else:
coreml_model = tfcoreml.convert(
tf_model_path=args.model_dir,
mlmodel_path=args.output_file,
input_name_shape_dict = {'input':[1,160,160,3]},
output_feature_names=["embeddings"],
minimum_ios_deployment_target = '13'
)
return
else:
if args.model_dir[-8:] != '.mlmodel':
print("Error: the model type must be .mlmodel")
return
if args.type == 'FLOAT16':
model_spec = coremltools.utils.load_spec(args.model_dir)
model_fp16_spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16(model_spec)
coremltools.utils.save_spec(model_fp16_spec,args.output_file)
return
else:
model = coremltools.models.MLModel(args.model_dir)
bit = int(args.type[-1])
print("quantization in INT" + str(bit))
quantized_model = quantization_utils.quantize_weights(model, bit, "linear")
quantized_model.save(args.output_file)
return
print('File correctly saved in:', args.output_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str,
help='This argument will be: .pb file for FLOAT32, .mlmodel otherwise (model quantization)')
parser.add_argument('output_file', type=str,
help='Filename for the converted coreml model (.mlmodel)')
parser.add_argument('--type', type=str, choices=['FLOAT32','FLOAT16','INT8','INT6','INT4','INT3','INT2','INT1'], help="embeddings' type", default='FLOAT32')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 38.906667
| 160
| 0.660384
| 376
| 2,918
| 4.973404
| 0.428191
| 0.032086
| 0.032086
| 0.017112
| 0.029947
| 0.029947
| 0.029947
| 0
| 0
| 0
| 0
| 0.024015
| 0.24366
| 2,918
| 74
| 161
| 39.432432
| 0.82329
| 0.187457
| 0
| 0.173913
| 0
| 0
| 0.188751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.108696
| 0
| 0.282609
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93a90aa96a7060708343be286a46a3cbad16b9b8
| 628
|
py
|
Python
|
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
def find_from(string, subs, start = None, end = None):
"""
Returns a tuple of the lowest index where a substring in the iterable "subs" was found, and the substring.
If multiple substrings are found, it will return the first one.
If nothing is found, it will return (-1, None)
"""
string = string[start:end]
last_index = len(string)
substring = None
for s in subs:
i = string.find(s)
if i != -1 and i < last_index:
last_index = i
substring = s
if last_index == len(string):
return (-1, None)
return (last_index, substring)
| 27.304348
| 110
| 0.598726
| 90
| 628
| 4.111111
| 0.433333
| 0.121622
| 0.059459
| 0.091892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006928
| 0.31051
| 628
| 22
| 111
| 28.545455
| 0.847575
| 0.345541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93aa7bc7eef6be2b816f51dac8d5aa561ac4c490
| 4,844
|
py
|
Python
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 16
|
2020-06-22T05:24:20.000Z
|
2022-02-15T11:41:14.000Z
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 24
|
2020-07-07T04:22:03.000Z
|
2021-01-03T07:21:02.000Z
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 3
|
2020-06-21T12:12:14.000Z
|
2021-09-01T04:46:59.000Z
|
# todos
# - [ ] all dates and date deltas are in time, not integers
from loguru import logger
from typing import Dict
import sys
import datetime
from datetime import timedelta
import numpy as np
from ta_scanner.data.data import load_and_cache, db_data_fetch_between, aggregate_bars
from ta_scanner.data.ib import IbDataFetcher
from ta_scanner.experiments.simple_experiment import SimpleExperiment
from ta_scanner.indicators import (
IndicatorSmaCrossover,
IndicatorEmaCrossover,
IndicatorParams,
)
from ta_scanner.signals import Signal
from ta_scanner.filters import FilterCumsum, FilterOptions, FilterNames
from ta_scanner.reports import BasicReport
from ta_scanner.models import gen_engine
ib_data_fetcher = IbDataFetcher()
instrument_symbol = "/NQ"
rth = False
interval = 1
field_name = "ema_cross"
slow_sma = 25
fast_sma_min = 5
fast_sma_max = 20
filter_inverse = True
win_pts = 75
loss_pts = 30
trade_interval = 12
test_total_pnl = 0.0
test_total_count = 0
all_test_results = []
engine = gen_engine()
logger.remove()
logger.add(sys.stderr, level="INFO")
def gen_params(sd, ed) -> Dict:
return dict(start_date=sd, end_date=ed, use_rth=rth, groupby_minutes=interval)
def run_cross(original_df, fast_sma: int, slow_sma: int):
df = original_df.copy()
# indicator setup
indicator_params = {
IndicatorParams.fast_ema: fast_sma,
IndicatorParams.slow_ema: slow_sma,
}
indicator = IndicatorEmaCrossover(field_name, indicator_params)
indicator.apply(df)
# filter setup
filter_params = {
FilterOptions.win_points: win_pts,
FilterOptions.loss_points: loss_pts,
FilterOptions.threshold_intervals: trade_interval,
}
sfilter = FilterCumsum(field_name, filter_params)
# generate results
if filter_inverse:
results = sfilter.apply(df, inverse=1)
else:
results = sfilter.apply(df)
# get aggregate pnl
basic_report = BasicReport()
pnl, count, avg, median = basic_report.analyze(df, field_name)
return pnl, count, avg, median
def run_cross_range(df, slow_sma: int, fast_sma_min, fast_sma_max):
results = []
for fast_sma in range(fast_sma_min, fast_sma_max):
pnl, count, avg, median = run_cross(df, fast_sma, slow_sma)
results.append([fast_sma, pnl, count, avg, median])
return results
def fetch_data():
sd = datetime.date(2020, 7, 1)
ed = datetime.date(2020, 8, 15)
load_and_cache(instrument_symbol, ib_data_fetcher, **gen_params(sd, ed))
def query_data(engine, symbol, sd, ed, groupby_minutes):
df = db_data_fetch_between(engine, symbol, sd, ed)
df.set_index("ts", inplace=True)
df = aggregate_bars(df, groupby_minutes=groupby_minutes)
df["ts"] = df.index
return df
# fetch_data()
for i in range(0, 33):
initial = datetime.date(2020, 7, 10) + timedelta(days=i)
test_start, test_end = initial, initial
if initial.weekday() in [5, 6]:
continue
# fetch training data
train_sd = initial - timedelta(days=5)
train_ed = initial - timedelta(days=1)
df_train = query_data(engine, instrument_symbol, train_sd, train_ed, interval)
# for training data, let's find results for a range of SMA
results = run_cross_range(
df_train,
slow_sma=slow_sma,
fast_sma_min=fast_sma_min,
fast_sma_max=fast_sma_max,
)
fast_sma_pnl = []
for resultindex in range(2, len(results) - 3):
fast_sma = results[resultindex][0]
pnl = results[resultindex][1]
result_set = results[resultindex - 2 : resultindex + 3]
total_pnl = sum([x[1] for x in result_set])
fast_sma_pnl.append([fast_sma, total_pnl, pnl])
arr = np.array(fast_sma_pnl, dtype=float)
max_tuple = np.unravel_index(np.argmax(arr, axis=None), arr.shape)
optimal_fast_sma = int(arr[(max_tuple[0], 0)])
optimal_fast_sma_pnl = [x[2] for x in fast_sma_pnl if x[0] == optimal_fast_sma][0]
# logger.info(f"Selected fast_sma={optimal_fast_sma}. PnL={optimal_fast_sma_pnl}")
test_sd = initial
test_ed = initial + timedelta(days=1)
df_test = query_data(engine, instrument_symbol, test_sd, test_ed, interval)
test_results = run_cross(df_test, optimal_fast_sma, slow_sma)
all_test_results.append([initial] + list(test_results))
logger.info(
f"Test Results. pnl={test_results[0]}, count={test_results[1]}, avg={test_results[2]}, median={test_results[3]}"
)
test_total_pnl += test_results[0]
test_total_count += test_results[1]
logger.info(
f"--- CumulativePnL={test_total_pnl}. Trades Count={test_total_count}. After={initial}"
)
import csv
with open("simple_results.csv", "w") as csvfile:
spamwriter = csv.writer(csvfile)
for row in all_test_results:
spamwriter.writerow(row)
| 28
| 120
| 0.706441
| 698
| 4,844
| 4.637536
| 0.266476
| 0.06055
| 0.032129
| 0.021007
| 0.061477
| 0.033982
| 0
| 0
| 0
| 0
| 0
| 0.015861
| 0.193022
| 4,844
| 172
| 121
| 28.162791
| 0.812228
| 0.061519
| 0
| 0.016949
| 0
| 0.008475
| 0.051158
| 0.032635
| 0
| 0
| 0
| 0.005814
| 0
| 1
| 0.042373
| false
| 0
| 0.127119
| 0.008475
| 0.20339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ade385d6ee900f8bf10af83edfd79ce2a15da9
| 841
|
py
|
Python
|
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | null | null | null |
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | 1
|
2020-11-15T15:43:03.000Z
|
2020-11-15T15:43:16.000Z
|
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import os
print(tk)
print(dir(tk))
print(tk.TkVersion)
print(os.getcwd())
'''To initialize tkinter, we have to create a Tk root widget, which is a window with a title bar and
other decoration provided by the window manager. The root widget has to be created before any other widgets and
there can only be one root widget.'''
root = tk.Tk()
'''The next line of code contains the Label widget.
The first parameter of the Label call is the name of the parent window, in our case "root".
So our Label widget is a child of the root widget. The keyword parameter "text" specifies the text to be shown: '''
w = tk.Label(root,text='Hello world')
'''The pack method tells Tk to fit the size of the window to the given text. '''
w.pack()
'''The window won't appear until we enter the Tkinter event loop'''
root.mainloop()
| 36.565217
| 115
| 0.737218
| 153
| 841
| 4.052288
| 0.503268
| 0.064516
| 0.041935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178359
| 841
| 22
| 116
| 38.227273
| 0.89725
| 0
| 0
| 0
| 0
| 0
| 0.060773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93aee3614d8d0959902e63d0a0a8aa33c102d4fd
| 14,700
|
py
|
Python
|
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect,get_object_or_404
from remiljscrumy.models import ScrumyGoals,GoalStatus,ScrumyHistory,User
from django.http import HttpResponse,Http404,HttpResponseRedirect
from .forms import SignupForm,CreateGoalForm,MoveGoalForm,DevMoveGoalForm,AdminChangeGoalForm,QAChangeGoalForm,QAChangegoal
from django.contrib.auth import authenticate,login
from django.contrib.auth.models import User,Group
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
#from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
# scrumygoals = ScrumyGoals.objects.all()
# return HttpResponse(scrumygoals)
if request.method == 'POST':
#this is a method used to send data to the server
form = SignupForm(request.POST)
#creates the form instance and bounds form data to it
if form .is_valid():#used to validate the form
#add_goal = form.save(commit=False)#save an object bounds in the form
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
# password2 = form.cleaned_data.get('password1')
# if password2 != raw_password:
# raise form.Http404('password must match')
user = authenticate(username=username, password=raw_password)
user.is_staff=True
login(request,user)
g = Group.objects.get(name='Developer')
g.user_set.add(request.user)
user.save()
return redirect('home')
else:
form = SignupForm()#creates an unbound form with an empty data
return render(request, 'remiljscrumy/index.html', {'form': form})
def filterArg(request):
output = ScrumyGoals.objects.filter(goal_name='Learn Django')
return HttpResponse(output)
def move_goal(request, goal_id):
verifygoal = GoalStatus.objects.get(status_name="Verify Goal")
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
current = request.user
group = current.groups.all()[0]
try:
goal = ScrumyGoals.objects.get(goal_id=goal_id)
except ObjectDoesNotExist:
notexist = 'A record with that goal id does not exist'
context = {'not_exist': notexist}
return render(request, 'remiljscrumy/exception.html', context)
if group == Group.objects.get(name='Developer') and current == goal.user:
form = DevMoveGoalForm()
if request.method == 'POST':
form = DevMoveGoalForm(request.POST)
if form.is_valid():
selected_status = form.save(commit=False)
selected = form.cleaned_data['goal_status']
get_status = selected_status.status_name
choice = GoalStatus.objects.get(id=int(selected))
goal.goal_status = choice
goal.save()
return HttpResponseRedirect(reverse('home'))
else:
form = DevMoveGoalForm()
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'current_user': current, 'group': group})
if group == Group.objects.get(name='Developer') and current != goal.user:
form = DevMoveGoalForm()
if request.method == 'GET':
notexist = 'YOU DO NO NOT HAVE THE PERMISSION TO CHANGE OTHER USERS GOAL'
context = {'not_exist': notexist}
return render(request, 'remiljscrumy/exception.html', context)
if group == Group.objects.get(name='Admin'):
form = AdminChangeGoalForm()
if request.method == 'GET':
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'currentuser': current, 'group': group})
if request.method == 'POST':
form = AdminChangeGoalForm(request.POST)
if form.is_valid():
selected_status = form.save(commit=False)
get_status = selected_status.goal_status
goal.goal_status = get_status
goal.save()
return HttpResponseRedirect(reverse('home'))
else:
form = AdminChangeGoalForm()
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'current_user': current, 'group': group})
if group == Group.objects.get(name='Owner') and current == goal.user:
form = AdminChangeGoalForm()
if request.method == 'GET':
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'currentuser': current, 'group': group})
if request.method == 'POST':
form = AdminChangeGoalForm(request.POST)
if form.is_valid():
selected_status = form.save(commit=False)
get_status = selected_status.goal_status
goal.goal_status = get_status
goal.save()
return HttpResponseRedirect(reverse('home'))
else:
form = AdminChangeGoalForm()
return render(request, 'remiljscrumy/movegoal.html',{'form': form, 'goal': goal, 'current_user': current, 'group': group})
# else:
# notexist = 'You cannot move other users goals'
# context = {'not_exist': notexist}
# return render(request, 'maleemmyscrumy/exception.html', context)
if group == Group.objects.get(name='Quality Assurance') and current == goal.user:
form = QAChangegoal()
if request.method == 'GET':
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'currentuser': current, 'group': group})
if request.method == 'POST':
form = QAChangegoal(request.POST)
if form.is_valid():
selected_status = form.save(commit=False)
selected = form.cleaned_data['goal_status']
get_status = selected_status.status_name
choice = GoalStatus.objects.get(id=int(selected))
goal.goal_status = choice
goal.save()
return HttpResponseRedirect(reverse('home'))
else:
form = QAChangegoal()
return render(request, 'remiljscrumy/movegoal.html',{'form': form, 'goal': goal, 'currentuser': current, 'group': group})
if group == Group.objects.get(name='Quality Assurance') and current != goal.user and goal.goal_status == verifygoal:
form = QAChangeGoalForm()
if request.method == 'GET':
return render(request, 'remiljscrumy/movegoal.html', {'form': form, 'goal': goal, 'currentuser': current, 'group': group})
if request.method == 'POST':
form = QAChangeGoalForm(request.POST)
if form.is_valid():
selected_status = form.save(commit=False)
selected = form.cleaned_data['goal_status']
get_status = selected_status.status_name
choice = GoalStatus.objects.get(id=int(selected))
goal.goal_status = choice
goal.save()
return HttpResponseRedirect(reverse('home'))
else:
form = QAChangeGoalForm()
return render(request, 'remiljscrumy/movegoal.html',{'form': form, 'goal': goal, 'currentuser': current, 'group': group})
else:
notexist = 'You can only move goal from verify goals to done goals'
context = {'not_exist': notexist}
return render(request, 'remiljscrumy/exception.html', context)
# def move_goal(request, goal_id):
# #response = ScrumyGoals.objects.get(goal_id=goal_id)
# # try:
# #goal = ScrumyGoals.objects.get(goal_id=goal_id)
# # except ScrumyGoals.DoesNotExist:
# # raise Http404 ('A record with that goal id does not exist')
# instance = get_object_or_404(ScrumyGoals,goal_id=goal_id)
# form = MoveGoalForm(request.POST or None, instance=instance)
# if form. is_valid():
# instance = form.save(commit=False)
# instance.save()
# return redirect('home')
# context={
# 'goal_id': instance.goal_id,
# 'user': instance.user,
# 'goal_status': instance.goal_status,
# 'form':form,
# }
# return render(request, 'remiljscrumy/exception.html', context)
#move_goal = form.save(commit=False)
# move_goal =
# form.save()
# # goal_name = form.cleaned_data.get('goal_name')
# # ScrumyGoals.objects.get(goal_name)
# return redirect('home')
# def form_valid(self, form):
# form.instance.goal_status = self.request.user
# return super(addgoalForm, self).form_valid(form)
# }
# return render(request, 'remiljscrumy/exception.html', context=gdict)
#return HttpResponse(response)
# return HttpResponse('%s is the response at the record of goal_id %s' % (response, goal_id))'''
from random import randint
def add_goal(request):
# existing_id = ScrumyGoals.objects.order_by('goal_id')
# while True:
# goal_id = randint(1000, 10000) #returns a random number between 1000 and 9999
# if goal_id not in existing_id:
# pr = ScrumyGoals.objects.create(goal_name='Keep Learning Django', goal_id=goal_id, created_by='Louis', moved_by="Louis", goal_status=GoalStatus.objects.get(pk=1), user=User.objects.get(pk=6))
# break
# form = CreateGoalForm
# if request.method == 'POST':
# form = CreateGoalForm(request.POST)
# if form .is_valid():
# add_goals = form.save(commit=False)
# add_goals = form.save()
# #form.save()
# return redirect('home')
# else:
# form = CreateGoalForm()
return render(request, 'remiljscrumy/addgoal.html', {'form': form})
def home(request):
'''# all=','.join([eachgoal.goal_name for eachgoal in ScrumyGoals.objects.all()])
# home = ScrumyGoals.objects.filter(goal_name='keep learning django')
# return HttpResponse(all)
#homedict = {'goal_name':ScrumyGoals.objects.get(pk=3).goal_name,'goal_id':ScrumyGoals.objects.get(pk=3).goal_id, 'user': ScrumyGoals.objects.get(pk=3).user,}
user = User.objects.get(email="louisoma@linuxjobber.com")
name = user.scrumygoal.all()
homedict={'goal_name':ScrumyGoals.objects.get(pk=1).goal_name,'goal_id':ScrumyGoals.objects.get(pk=1).goal_id,'user':ScrumyGoals.objects.get(pk=1).user,
'goal_name1':ScrumyGoals.objects.get(pk=2).goal_name,'goal_id1':ScrumyGoals.objects.get(pk=2).goal_id,'user':ScrumyGoals.objects.get(pk=2).user,
'goal_name2':ScrumyGoals.objects.get(pk=3).goal_name,'goal_id2':ScrumyGoals.objects.get(pk=3).goal_id,'user2':ScrumyGoals.objects.get(pk=3).user}'''
# form = CreateGoalForm
# if request.method == 'POST':
# form = CreateGoalForm(request.POST)
# if form .is_valid():
# add_goal = form.save(commit=True)
# add_goal = form.save()
# # #form.save()
# return redirect('home')
current = request.user
week = GoalStatus.objects.get(pk=1)
day = GoalStatus.objects.get(pk=2)
verify = GoalStatus.objects.get(pk=3)
done = GoalStatus.objects.get(pk=4)
user = User.objects.all()
weeklygoal = ScrumyGoals.objects.filter(goal_status=week)
dailygoal = ScrumyGoals.objects.filter(goal_status=day)
verifygoal = ScrumyGoals.objects.filter(goal_status=verify)
donegoal = ScrumyGoals.objects.filter(goal_status=done)
groups = current.groups.all()
dev = Group.objects.get(name='Developer')
owner = Group.objects.get(name='Owner')
admin = Group.objects.get(name='Admin')
qa = Group.objects.get(name='Quality Assurance')
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
if current.is_authenticated:
if dev in groups or qa in groups or owner in groups:
# if request.method == 'GET':
# return render(request, 'remiljscrumy/home.html', context)
form = CreateGoalForm()
context = {'user': user, 'weeklygoal': weeklygoal, 'dailygoal': dailygoal, 'verifygoal': verifygoal,
'donegoal': donegoal, 'form': form, 'current': current, 'groups': groups,'dev': dev,'owner':owner,'admin':admin,'qa':qa}
if request.method == 'POST':
form = CreateGoalForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
status_name = GoalStatus(id=1)
post.goal_status = status_name
post.user = current
post = form.save()
elif admin in groups:
context = {'user': user, 'weeklygoal': weeklygoal, 'dailygoal': dailygoal, 'verifygoal': verifygoal,
'donegoal': donegoal,'current': current, 'groups': groups,'dev': dev,'owner':owner,'admin':admin,'qa':qa}
return render(request, 'remiljscrumy/home.html', context)
# else:
# form = WeekOnlyAddGoalForm()
# return HttpResponseRedirect(reverse('ayooluwaoyewoscrumy:homepage'))
# if group == 'Admin':
# context ={
# 'user':User.objects.all(),
# 'weeklygoal':ScrumyGoals.objects.filter(goal_status=week),
# 'dailygoal':ScrumyGoals.objects.filter(goal_status=day),
# 'verifiedgoals':ScrumyGoals.objects.filter(goal_status=verify),
# 'donegoal':ScrumyGoals.objects.filter(goal_status=done),
# 'current':request.user,
# 'groups':request.user.groups.all(),
# 'admin': Group.objects.get(name="Admin"),
# 'owner': Group.objects.get(name='Owner'),
# 'dev': Group.objects.get(name='Developer'),
# 'qa': Group.objects.get(name='Quality Assurance'),}
# return render(request,'remiljscrumy/home.html',context=homedict)
# if request.method == 'GET':
# return render(request, 'remiljscrumy/home.html', context)
#
| 49.328859
| 206
| 0.606531
| 1,590
| 14,700
| 5.515723
| 0.131447
| 0.047891
| 0.045496
| 0.070696
| 0.645382
| 0.601824
| 0.547434
| 0.502166
| 0.46146
| 0.451653
| 0
| 0.00569
| 0.27068
| 14,700
| 298
| 207
| 49.328859
| 0.812331
| 0.319728
| 0
| 0.553571
| 0
| 0
| 0.125942
| 0.040306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029762
| false
| 0.011905
| 0.059524
| 0.005952
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b17847a4ea4d1f1c0c385ce9727ab17aed5c27
| 3,088
|
py
|
Python
|
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
'''
Cilia classes are used to compute fixed points faster.
- Assume symmetry like in an m-twist (make a plot to see it)
- Assume that symmetries is not broken in time -> define classes of symmetry and interactions between them.
Done:
- Create a ring of cilia.
- Define symmetry classes
- Use classes to solve ODE
- Map back to cilia
'''
import numpy as np
import carpet
import carpet.lattice.ring1d as lattice
import carpet.physics.friction_pairwise as physics
import carpet.classes as cc
import carpet.visualize as vis
import matplotlib.pyplot as plt
## Parameters
# Physics
set_name = 'machemer_1' # hydrodynamic friction coefficients data set
period = 31.25 # [ms] period
freq = 2 * np.pi / period # [rad/ms] angular frequency
order_g11 = (4, 0) # order of Fourier expansion of friction coefficients
order_g12 = (4, 4)
# Geometry
N = 6 # number of cilia
a = 18 # [um] lattice spacing
e1 = (1, 0) # direction of the chain
## Initialize
# Geometry
L1 = lattice.get_domain_size(N, a)
coords, lattice_ids = lattice.get_nodes_and_ids(N, a, e1) # get cilia (nodes) coordinates
NN, TT = lattice.get_neighbours_list(N, a, e1) # get list of neighbours and relative positions
e1, e2 = lattice.get_basis(e1)
get_k = lattice.define_get_k(N, a, e1)
get_mtwist = lattice.define_get_mtwist(coords, N, a, e1)
# Physics
gmat_glob, q_glob = physics.define_gmat_glob_and_q_glob(set_name, e1, e2, a, NN, TT, order_g11, order_g12, period)
right_side_of_ODE = physics.define_right_side_of_ODE(gmat_glob, q_glob)
solve_cycle = carpet.define_solve_cycle(right_side_of_ODE, 2 * period, phi_global_func=carpet.get_mean_phase)
# k-twist
k1 = 2
phi0 = get_mtwist(k1)
vis.plot_nodes(coords, phi=phi0) # visualize!
plt.ylim([-L1 / 10, L1 / 10])
plt.show()
## Solve regularly
tol = 1e-4
sol = solve_cycle(phi0, tol)
phi1 = sol.y.T[-1] - 2 * np.pi # after one cycle
## Now solve with classes
# Map to classes
ix_to_class, class_to_ix = cc.get_classes(phi0)
nclass = len(class_to_ix)
# Get classes representatives
# Get one oscillator from each of cilia classes
unique_cilia_ids = cc.get_unique_cilia_ix(
class_to_ix) # equivalent to sp.array([class_to_ix[iclass][0] for iclass in range(nclass)], dtype=sp.int64)
# Get connections
N1_class, T1_class = cc.get_neighbours_list_class(unique_cilia_ids, ix_to_class, NN, TT)
# Define physics
gmat_glob_class, q_glob_class = physics.define_gmat_glob_and_q_glob(set_name, e1, e2, a, N1_class, T1_class,
order_g11, order_g12, period)
right_side_of_ODE_class = physics.define_right_side_of_ODE(gmat_glob_class, q_glob_class)
solve_cycle_class = carpet.define_solve_cycle(right_side_of_ODE_class, 2 * period, carpet.get_mean_phase)
# Solve ODE
phi0_class = phi0[unique_cilia_ids]
sol = solve_cycle_class(phi0_class, tol)
phi1_class = sol.y.T[-1] - 2 * np.pi
# Map from classes back to cilia
phi1_mapped_from_class = phi1_class[ix_to_class]
## Print how much phase changed
print(phi1_mapped_from_class - phi1) # difference between two - should be on the order of tolerance or smaller
| 36.761905
| 114
| 0.748381
| 520
| 3,088
| 4.196154
| 0.330769
| 0.021998
| 0.030247
| 0.038497
| 0.188818
| 0.163153
| 0.145738
| 0.135655
| 0.070577
| 0.03758
| 0
| 0.030104
| 0.160946
| 3,088
| 83
| 115
| 37.204819
| 0.812042
| 0.352008
| 0
| 0
| 0
| 0
| 0.005105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.152174
| 0
| 0.152174
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b1f4ae1de1aaae99760a70f835707158943004
| 749
|
py
|
Python
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | 6
|
2018-03-27T15:46:28.000Z
|
2018-06-23T21:56:15.000Z
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | 3
|
2018-03-30T15:54:34.000Z
|
2018-07-11T19:44:59.000Z
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | null | null | null |
"""
SCL <scott@rerobots.net>
2018
"""
import json
import os
import tempfile
import time
class PCA9685:
def __init__(self):
self._fd, self._pathname = tempfile.mkstemp(prefix='kuaikai_sim_', dir='/tmp', text=True)
self._fp = os.fdopen(self._fd, 'wt')
def __del__(self):
self._fp.close()
def set_pwm_freq(self, freq_hz):
self._fp.write(json.dumps({
'time': time.time(),
'fcn': 'set_pwm_freq',
'args': {'freq_hz': freq_hz},
}) + '\n')
def set_pwm(self, channel, on, off):
self._fp.write(json.dumps({
'time': time.time(),
'fcn': 'set_pwm',
'args': {'channel': channel, 'on': on, 'off': off},
}) + '\n')
| 22.69697
| 97
| 0.534045
| 95
| 749
| 3.936842
| 0.442105
| 0.064171
| 0.048128
| 0.080214
| 0.219251
| 0.219251
| 0.219251
| 0.219251
| 0.219251
| 0.219251
| 0
| 0.014953
| 0.285714
| 749
| 32
| 98
| 23.40625
| 0.684112
| 0.038718
| 0
| 0.272727
| 0
| 0
| 0.115331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b2760677f1d106e80a9cb1e7a2b2ab58fbe987
| 2,851
|
py
|
Python
|
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | null | null | null |
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | null | null | null |
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | 1
|
2019-02-05T20:22:08.000Z
|
2019-02-05T20:22:08.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
def set_ax_range():
LEFT_AX.set_xlim(X_RANGE)
LEFT_AX.set_ylim(Y_RANGE)
def range_plot(ax, f, x_range, y_range):
bins = 50
xi, yi = np.mgrid[
min(x_range):max(x_range):bins*1j,
min(y_range):max(y_range):bins*1j
]
zi = np.array([
f(x, Y_DATA)
for x, Y_DATA in zip(xi.flatten(), yi.flatten())
])
ax.pcolormesh(xi, yi, zi.reshape(xi.shape))
def likelihood(X, Y, w):
Z = X @ w
var = np.var(Y)
return np.prod([scipy.stats.norm(z, var).pdf(y) for z, y in zip(Z, Y)])
class SequentialBayes:
def __init__(self, mean, var):
self.prior_mean = mean
self.prior_var = var
def pdf(self, x):
return scipy.stats.multivariate_normal(self.prior_mean, self.prior_var).pdf(x)
def sample(self, size):
return np.random.multivariate_normal(self.prior_mean, self.prior_var, size=size)
def update(self, X, y):
y_var = np.var(y) + 0.01
inv_prior_var = np.linalg.inv(self.prior_var)
inv_y_var = 1 / y_var ** 2
posterior_var = np.linalg.inv(
inv_prior_var + inv_y_var * (X.T @ X)
)
posterior_mean = posterior_var @ (
inv_prior_var @ self.prior_mean + inv_y_var * X.T @ y
)
self.prior_mean = posterior_mean
self.prior_var = posterior_var
return posterior_mean, posterior_var
def on_click(event):
if event.xdata is None:
return
LEFT_AX.clear()
global BELIEF, X_DATA, Y_DATA
mouse_x, mouse_y = event.xdata, event.ydata
if len(X_DATA) > 0:
X_DATA = np.vstack((X_DATA, [1, mouse_x]))
Y_DATA = np.append(Y_DATA, [mouse_y])
else:
X_DATA = np.array([[1, mouse_x]])
Y_DATA = np.array([mouse_y])
BELIEF.prior_mean = np.array([0, 0])
BELIEF.prior_var = np.diag([1, 1])
BELIEF.update(X_DATA, Y_DATA)
range_plot(MIDDLE_AX, lambda w1, w2: likelihood(
X_DATA, Y_DATA, np.array([w1, w2])
), X_RANGE, Y_RANGE)
plot_belief(RIGHT_AX)
plot_belief_sample()
LEFT_AX.scatter(X_DATA[:, 1], Y_DATA, c='r')
set_ax_range()
plt.pause(0.05)
def plot_belief_sample():
Ws = BELIEF.sample(10)
X = np.array([[1, X_RANGE[0]], [1, X_RANGE[1]]])
ys = X @ Ws.T
LEFT_AX.plot([X_RANGE[0], X_RANGE[1]], ys, 'b--', linewidth=0.4)
def plot_belief(ax):
range_plot(ax, lambda w1, w2: BELIEF.pdf([w1, w2]), X_RANGE, Y_RANGE)
FIG, (LEFT_AX, MIDDLE_AX, RIGHT_AX) = plt.subplots(1, 3, figsize=(10, 3))
X_RANGE = (-2, 2)
Y_RANGE = (-2, 2)
X_DATA = np.array([])
Y_DATA = np.array([])
BELIEF = SequentialBayes(np.array([0, 0]), np.diag([1, 1]))
set_ax_range()
plot_belief(RIGHT_AX)
plot_belief_sample()
FIG.canvas.mpl_connect('button_press_event', on_click)
plt.show()
| 24.791304
| 88
| 0.618029
| 478
| 2,851
| 3.439331
| 0.228033
| 0.040146
| 0.039538
| 0.038929
| 0.156934
| 0.131995
| 0.09854
| 0.09854
| 0
| 0
| 0
| 0.023373
| 0.234655
| 2,851
| 114
| 89
| 25.008772
| 0.730064
| 0
| 0
| 0.072289
| 0
| 0
| 0.007717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120482
| false
| 0
| 0.036145
| 0.024096
| 0.228916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b33aae2d1691aa0b0588d3a8ea2f43f4819a38
| 9,255
|
py
|
Python
|
cgc/legacy/kmeans.py
|
cffbots/cgc
|
1ea8b6bb6e4e9e728aff493744d8646b4953eaa4
|
[
"Apache-2.0"
] | 11
|
2020-09-04T10:28:48.000Z
|
2022-03-10T13:56:43.000Z
|
cgc/legacy/kmeans.py
|
cffbots/cgc
|
1ea8b6bb6e4e9e728aff493744d8646b4953eaa4
|
[
"Apache-2.0"
] | 40
|
2020-08-19T09:23:15.000Z
|
2022-03-01T16:16:30.000Z
|
cgc/legacy/kmeans.py
|
phenology/geoclustering
|
9b9b6ab8e64cdb62dbed6bdcfe63612e99665fd1
|
[
"Apache-2.0"
] | 4
|
2020-10-03T21:17:18.000Z
|
2022-03-09T14:32:56.000Z
|
import numpy as np
import logging
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from ..results import Results
logger = logging.getLogger(__name__)
class KmeansResults(Results):
"""
Contains results and metadata of a k-means refinement calculation
"""
def reset(self):
self.k_value = None
self.var_list = None
self.cl_mean_centroids = None
class Kmeans(object):
def __init__(self,
Z,
row_clusters,
col_clusters,
n_row_clusters,
n_col_clusters,
k_range,
kmean_max_iter=100,
var_thres=2.,
output_filename=''):
"""
Set up Kmeans object.
:param Z: m x n matrix of spatial-temporal data. Usually each row is a
time-series of a spatial grid.
:type Z: class:`numpy.ndarray`
:param row_clusters: m x 1 row cluster array.
:type row_clusters: class:`numpy.ndarray`
:param col_clusters: n x 1 column cluster array.
:type col_clusters: class:`numpy.ndarray`
:param n_row_clusters: number of row clusters
:type n_row_clusters: int
:param n_col_clusters: number of column clusters
:type n_col_clusters: int
:param k_range: range of the number of clusters, i.e. value "k"
:type k_range: range
:param kmean_max_iter: maximum number of iterations of the KMeans
:type kmean_max_iter: int
:param var_thres: threshold of the sum of variance to select k
:type var_thres: float
:param output_filename: name of the file where to write the results
:type output_filename: str
"""
# Input parameters -----------------
self.row_clusters = row_clusters
self.col_clusters = col_clusters
self.n_row_clusters = n_row_clusters
self.n_col_clusters = n_col_clusters
self.k_range = list(k_range)
self.kmean_max_iter = kmean_max_iter
self.var_thres = var_thres
self.output_filename = output_filename
# Input parameters end -------------
# store input parameters in results object
self.results = KmeansResults(**self.__dict__)
self.Z = Z
if not max(self.row_clusters) < self.n_row_clusters:
raise ValueError("row_clusters include labels >= n_row_clusters")
if not max(self.col_clusters) < self.n_col_clusters:
raise ValueError("col_clusters include labels >= n_col_clusters")
if not min(self.k_range) > 0:
raise ValueError("All k-values in k_range must be > 0")
nonempty_row_cl = len(np.unique(self.row_clusters))
nonempty_col_cl = len(np.unique(self.col_clusters))
max_k = nonempty_row_cl * nonempty_col_cl
max_k_input = max(self.k_range)
if max_k_input > max_k:
raise ValueError("The maximum k-value exceeds the "
"number of (non-empty) co-clusters")
elif max_k_input > max_k * 0.8:
logger.warning("k_range includes large k-values (80% "
"of the number of co-clusters or more)")
def compute(self):
"""
Compute statistics for each clustering group.
Then Loop through the range of k values,
and compute the sum of variances of each k.
Finally select the smallest k which gives
the sum of variances smaller than the threshold.
:return: k-means result object
"""
# Get statistic measures
self._compute_statistic_measures()
# Search for value k
var_list = np.array([]) # List of variance of each k value
kmeans_cc_list = []
for k in self.k_range:
# Compute Kmean
kmeans_cc = KMeans(n_clusters=k, max_iter=self.kmean_max_iter).fit(
self.stat_measures_norm)
var_list = np.hstack((var_list, self._compute_sum_var(kmeans_cc)))
kmeans_cc_list.append(kmeans_cc)
idx_var_below_thres, = np.where(var_list < self.var_thres)
if len(idx_var_below_thres) == 0:
raise ValueError(f"No k-value has variance below "
f"the threshold: {self.var_thres}")
idx_k = min(idx_var_below_thres, key=lambda x: self.k_range[x])
self.results.var_list = var_list
self.results.k_value = self.k_range[idx_k]
self.kmeans_cc = kmeans_cc_list[idx_k]
del kmeans_cc_list
# Scale back centroids of the "mean" dimension
centroids_norm = self.kmeans_cc.cluster_centers_[:, 0]
stat_max = np.nanmax(self.stat_measures[:, 0])
stat_min = np.nanmin(self.stat_measures[:, 0])
mean_centroids = centroids_norm * (stat_max - stat_min) + stat_min
# Assign centroids to each cluster cell
cl_mean_centroids = mean_centroids[self.kmeans_cc.labels_]
# Reshape the centroids of means to the shape of cluster matrix,
# taking into account non-constructive row/col cluster
self.results.cl_mean_centroids = np.full(
(self.n_row_clusters, self.n_col_clusters), np.nan)
idx = 0
for r in np.unique(self.row_clusters):
for c in np.unique(self.col_clusters):
self.results.cl_mean_centroids[r, c] = cl_mean_centroids[idx]
idx = idx + 1
self.results.write(filename=self.output_filename)
return self.results
def _compute_statistic_measures(self):
"""
Compute 6 statistics: Mean, STD, 5 percentile, 95 percentile, maximum
and minimum values, for each co-cluster group.
Normalize them to [0, 1]
"""
row_clusters = np.unique(self.row_clusters)
col_clusters = np.unique(self.col_clusters)
self.stat_measures = np.zeros(
(len(row_clusters)*len(col_clusters), 6)
)
# Loop over co-clusters
for ir, r in enumerate(row_clusters):
idx_rows, = np.where(self.row_clusters == r)
for ic, c in enumerate(col_clusters):
idx_cols, = np.where(self.col_clusters == c)
rr, cc = np.meshgrid(idx_rows, idx_cols)
Z = self.Z[rr, cc]
idx = np.ravel_multi_index(
(ir, ic),
(len(row_clusters), len(col_clusters))
)
self.stat_measures[idx, 0] = Z.mean()
self.stat_measures[idx, 1] = Z.std()
self.stat_measures[idx, 2] = np.percentile(Z, 5)
self.stat_measures[idx, 3] = np.percentile(Z, 95)
self.stat_measures[idx, 4] = Z.max()
self.stat_measures[idx, 5] = Z.min()
# Normalize all statistics to [0, 1]
minimum = self.stat_measures.min(axis=0)
maximum = self.stat_measures.max(axis=0)
self.stat_measures_norm = np.divide(
(self.stat_measures - minimum),
(maximum - minimum)
)
# Set statistics to zero if all its values are identical (max == min)
self.stat_measures_norm[np.isnan(self.stat_measures_norm)] = 0.
def _compute_sum_var(self, kmeans_cc):
"""
Compute the sum of squared variance of each Kmean cluster
"""
# Compute the sum of variance of all points
var_sum = np.sum((self.stat_measures_norm -
kmeans_cc.cluster_centers_[kmeans_cc.labels_])**2)
return var_sum
def plot_elbow_curve(self, output_plot='./kmean_elbow_curve.png'):
"""
Export elbow curve plot
"""
plt.plot(self.k_range, self.results.var_list, marker='o')
plt.plot([min(self.k_range), max(self.k_range)],
[self.var_thres, self.var_thres],
color='r',
linestyle='--') # Threshold
plt.plot([self.results.k_value, self.results.k_value],
[min(self.results.var_list), max(self.results.var_list)],
color='g',
linestyle='--') # Selected k
x_min, x_max = min(self.k_range), max(self.k_range)
y_min, y_max = min(self.results.var_list), max(self.results.var_list)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.text(0.7,
min(
(self.var_thres-y_min)/(y_max-y_min) + 0.1,
0.9
),
'threshold={}'.format(self.var_thres),
color='r',
fontsize=12,
transform=plt.gca().transAxes)
plt.text(min(
(self.results.k_value-x_min)/(x_max-x_min) + 0.1,
0.9
),
0.7,
'k={}'.format(self.results.k_value),
color='g',
fontsize=12,
transform=plt.gca().transAxes)
plt.xlabel('k value', fontsize=20)
plt.ylabel('Sum of variance', fontsize=20)
plt.savefig(output_plot,
format='png',
transparent=True,
bbox_inches="tight")
| 38.723849
| 79
| 0.581415
| 1,210
| 9,255
| 4.220661
| 0.197521
| 0.051694
| 0.05326
| 0.021147
| 0.173879
| 0.073624
| 0.052085
| 0.02663
| 0.016448
| 0.016448
| 0
| 0.009571
| 0.322636
| 9,255
| 238
| 80
| 38.886555
| 0.805073
| 0.212966
| 0
| 0.092715
| 0
| 0
| 0.058161
| 0.003319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039735
| false
| 0
| 0.033113
| 0
| 0.099338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b68bf304e52b47592144b9352709027d4393ab
| 3,221
|
py
|
Python
|
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "$Apr 13, 2014 8:47:25 PM$"
import re
from config import CONFIG
from tests.exceptions import ResultsNotFoundError
from tests.benchmarks.tools.base import OnlineBenchmark
class Vellamo3(OnlineBenchmark):
def __init__(self, attributes, serial):
OnlineBenchmark.__init__(self, attributes, serial)
self._pull = attributes['pull']
self.failed_fields = attributes['failed_fields']
def start(self):
try:
self.sh('rm -r ' + self._pull + '*.html', errors=False, empty=True)
except:
pass
super(Vellamo3, self).start()
def __pull_logs(self):
raw_res = []
lslist = [x.replace('\r', '').strip() for x in self.sh('ls {} | grep .html'.format(self._pull)).split('\n')
if x.strip() != '\n' and x.strip() != '']
if len(lslist) == 0: raise ResultsNotFoundError('Vellamo HTML results not found.')
postfix = '_{}_{}.html'.format(CONFIG.CURRENT_CYCLE, CONFIG.LOCAL_CURRENT_CYCLE)
for x in lslist:
self.logger.info('Pulling {} log...'.format(x))
self.pull(self._pull + x, CONFIG.LOG_PATH + x[:-5]+postfix)
with open(CONFIG.LOG_PATH + x[:-5]+postfix, 'rb') as file:
raw_res.append(file.read())
return zip(lslist, raw_res)
def collect_results(self, res_doc):
raw_res_list = self.__pull_logs()
for log, raw_res in raw_res_list:
match = re.search('h2\sstyle.*?>([\w\s/.-]+)<.*?Score:.*?>([\d]+)</span>', raw_res, re.DOTALL|re.I)
res_doc.add_name(str(match.group(1).split(' ')[2].strip())+' ['+log+']')
res_doc.add_result(match.group(2))
rows = re.findall('<div\sclass=\'bm\'>(.*?)</div>', raw_res, re.DOTALL|re.I)
for row in rows:
match = re.search('<span>([\w\s/\(\).-]+)<.*?>([\w.\s]+)</span>', row, re.DOTALL|re.I)
res_doc.add_name(match.group(1))
# failed fields
if match.group(1) in self.failed_fields and match.group(2).strip() in ['0', '']:
res_doc.add_result('Failed')
for x in self.failed_fields[match.group(1)]:
res_doc.add_name(x)
res_doc.add_result('')
else:
res_doc.add_result(match.group(2))
values = re.findall('<li\sstyle.*?([\w\s/.-]+):\s([\w.\s]+)</li>', row, re.DOTALL|re.I)
for value in values:
# skip Invalid CPU mode error in result
if 'Invalid CPU' in value[1]:
continue
# skip failed field
if value[0].lower() == 'failed':
continue
res_doc.add_name(value[0])
res_doc.add_result(value[1])
self.sh('rm -r ' + self._pull + '*.html', errors=False)
| 46.681159
| 118
| 0.533064
| 404
| 3,221
| 4.081683
| 0.376238
| 0.036386
| 0.049121
| 0.045482
| 0.173438
| 0.140085
| 0.099454
| 0.06792
| 0.038811
| 0
| 0
| 0.013501
| 0.310152
| 3,221
| 68
| 119
| 47.367647
| 0.728623
| 0.068923
| 0
| 0.072727
| 0
| 0
| 0.128676
| 0.046791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0.018182
| 0.072727
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b6e5c40e7caecbcb7b62ae060f41d6eac3c44d
| 3,879
|
py
|
Python
|
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | 1
|
2020-05-07T00:24:21.000Z
|
2020-05-07T00:24:21.000Z
|
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | null | null | null |
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | null | null | null |
import sys
import unittest
import os
import tempfile
import shutil
import contextlib
import json
import subprocess
import PIL
import templatelayer.testing_common
_APP_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
_SCRIPT_PATH = os.path.join(_APP_PATH, 'templatelayer', 'resources', 'scripts')
_TOOL_FILEPATH = os.path.join(_SCRIPT_PATH, 'template_image_apply_overlays')
sys.path.insert(0, _APP_PATH)
class TestCommand(unittest.TestCase):
def test_run(self):
small_config = {
"placeholders": {
"top-left": {
"left": 0,
"top": 0,
"width": 50,
"height": 100
},
"top-right": {
"left": 50,
"top": 0,
"width": 50,
"height": 100
},
"middle-center": {
"left": 0,
"top": 100,
"width": 100,
"height": 100
},
"bottom-center": {
"left": 0,
"top": 200,
"width": 100,
"height": 100
}
}
}
with templatelayer.testing_common.temp_path() as temp_path:
# Template
template_im = \
templatelayer.testing_common.get_new_image(
100,
300,
color='blue')
template_im.save('template.png')
# Top-Left
component_topleft_im = \
templatelayer.testing_common.get_new_image(
50,
100,
color='green')
component_topleft_im.save('top_left.png')
# Top-Right
component_topright_im = \
templatelayer.testing_common.get_new_image(
50,
100,
color='red')
component_topright_im.save('top_right.png')
# Middle-Center
component_middlecenter_im = \
templatelayer.testing_common.get_new_image(
100,
100,
color='yellow')
component_middlecenter_im.save('middle_center.png')
# Bottom-Center
component_bottomcenter_im = \
templatelayer.testing_common.get_new_image(
100,
100,
color='orange')
component_bottomcenter_im.save('bottom_center.png')
with open('config.json', 'w') as f:
json.dump(small_config, f)
cmd = [
_TOOL_FILEPATH,
'config.json',
'--template-filepath', 'template.png',
'--component-filepath', 'top-left', 'top_left.png',
'--component-filepath', 'top-right', 'top_right.png',
'--component-filepath', 'middle-center', 'middle_center.png',
'--component-filepath', 'bottom-center', 'bottom_center.png',
'--output-filepath', 'output.png',
]
try:
actual = \
subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as cpe:
print(cpe.output)
raise
expected = """Applying: [top-left] [top_left.png]
Applying: [top-right] [top_right.png]
Applying: [middle-center] [middle_center.png]
Applying: [bottom-center] [bottom_center.png]
Writing.
"""
self.assertEquals(actual, expected)
| 28.733333
| 80
| 0.467389
| 331
| 3,879
| 5.250755
| 0.283988
| 0.080552
| 0.104718
| 0.080552
| 0.264672
| 0.161105
| 0.13809
| 0.13809
| 0.113924
| 0.113924
| 0
| 0.028855
| 0.428203
| 3,879
| 134
| 81
| 28.947761
| 0.754734
| 0.014179
| 0
| 0.262136
| 0
| 0
| 0.189104
| 0.007596
| 0
| 0
| 0
| 0
| 0.009709
| 1
| 0.009709
| false
| 0
| 0.097087
| 0
| 0.116505
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93b7d3ab9113fe2fed663ad41fb0b7d4b95f018e
| 3,993
|
py
|
Python
|
src/gpt2/evaluate_model.py
|
alexgQQ/GPT2
|
b2d78965f7cdcfe7dcf475969f4d4cce2b3ee82a
|
[
"Apache-2.0"
] | 94
|
2020-05-05T04:27:05.000Z
|
2022-03-31T01:08:20.000Z
|
src/gpt2/evaluate_model.py
|
seeodm/GPT2
|
366d8517ac0bdf85e45e46adbef10cbe55740ee1
|
[
"Apache-2.0"
] | 7
|
2020-09-11T02:25:30.000Z
|
2021-11-23T16:03:01.000Z
|
src/gpt2/evaluate_model.py
|
seeodm/GPT2
|
366d8517ac0bdf85e45e46adbef10cbe55740ee1
|
[
"Apache-2.0"
] | 24
|
2020-07-14T19:15:39.000Z
|
2022-02-18T05:57:31.000Z
|
import argparse
import torch
import torch.nn as nn
from gpt2.modeling import Transformer
from gpt2.data import Dataset, Vocab, TokenizedCorpus
from gpt2.evaluation import EvaluationSpec, EvaluateConfig, Evaluator
from typing import Dict
class GPT2EvaluationSpec(EvaluationSpec):
def __init__(self, eval_corpus: str, vocab_path: str, seq_len: int,
layers: int, heads: int, dims: int, rate: int):
self.eval_corpus = eval_corpus
self.vocab_path = vocab_path
self.seq_len = seq_len
self.layers = layers
self.heads = heads
self.dims = dims
self.rate = rate
def initialize(self):
self.vocab = Vocab(vocab_path=self.vocab_path)
self.criterion = nn.CrossEntropyLoss(reduction='none')
def prepare_dataset(self) -> Dataset:
return TokenizedCorpus(corpus_path=self.eval_corpus,
vocab=self.vocab,
seq_len=self.seq_len,
repeat=False)
def construct_model(self) -> nn.Module:
return Transformer(layers=self.layers, pad_idx=self.vocab.pad_idx,
words=len(self.vocab), seq_len=self.seq_len,
heads=self.heads, dims=self.dims, rate=self.rate,
dropout=0, bidirectional=False)
def eval_objective(self, data: Dict[str, torch.Tensor], model: nn.Module
) -> Dict[str, torch.Tensor]:
logits, _ = model(data['input'], past=None)
loss = self.criterion(logits.transpose(1, 2), data['output'])
mask = (data['output'] != self.vocab.pad_idx).float()
loss = (loss * mask).sum() / mask.sum()
perplexity = (loss.exp() * mask).sum() / mask.sum()
return {'loss': loss, 'perplexity': perplexity}
def evaluate_gpt2_model(args: argparse.Namespace):
spec = GPT2EvaluationSpec(
eval_corpus=args.eval_corpus, vocab_path=args.vocab_path,
seq_len=args.seq_len, layers=args.layers, heads=args.heads,
dims=args.dims, rate=args.rate)
config = EvaluateConfig(
batch_eval=args.batch_eval, total_steps=args.total_steps,
use_gpu=args.use_gpu)
print(Evaluator(spec, config).evaluate(from_model=args.model_path))
def add_subparser(subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser('evaluate', help='evaluate GPT-2 model')
parser.add_argument('--model_path', required=True,
help='trained GPT-2 model file path')
group = parser.add_argument_group('Corpus and vocabulary')
group.add_argument('--eval_corpus', required=True,
help='evaluation corpus file path')
group.add_argument('--vocab_path', required=True,
help='vocabulary file path')
group = parser.add_argument_group('Model configurations')
group.add_argument('--seq_len', default=64, type=int,
help='maximum sequence length')
group.add_argument('--layers', default=12, type=int,
help='number of transformer layers')
group.add_argument('--heads', default=16, type=int,
help='number of multi-heads in attention layer')
group.add_argument('--dims', default=1024, type=int,
help='dimension of representation in each layer')
group.add_argument('--rate', default=4, type=int,
help='increase rate of dimensionality in bottleneck')
group = parser.add_argument_group('Evaluation options')
group.add_argument('--batch_eval', default=64, type=int,
help='number of evaluation batch size')
group.add_argument('--total_steps', default=-1, type=int,
help='number of total evaluation steps')
group.add_argument('--use_gpu', action='store_true',
help='use gpu device in inferencing')
parser.set_defaults(func=evaluate_gpt2_model)
| 42.478723
| 76
| 0.630604
| 478
| 3,993
| 5.110879
| 0.263598
| 0.063037
| 0.065493
| 0.027835
| 0.103152
| 0.04912
| 0.04912
| 0
| 0
| 0
| 0
| 0.008763
| 0.25695
| 3,993
| 93
| 77
| 42.935484
| 0.814628
| 0
| 0
| 0
| 0
| 0
| 0.146256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.093333
| 0.026667
| 0.24
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ba2653ba488171fc0c6a50b7e6cee03b9a572c
| 1,332
|
py
|
Python
|
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | 5
|
2019-03-28T03:52:32.000Z
|
2021-02-24T07:09:26.000Z
|
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | null | null | null |
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | 2
|
2018-08-07T03:43:09.000Z
|
2019-12-09T06:41:40.000Z
|
from processutils.textfilter import Unpack
from utils.simplelog import Logger
import argparse
parser = argparse.ArgumentParser(description="my_unpack")
parser.add_argument('-f', "--file_prefix", required=True)
parser.add_argument('-sep', "--separator", required=True)
# args = parser.parse_args([
# "-f", "../test/medicine.sample.data/data.test",
# "-sep", ' ||| '
# ])
args = parser.parse_args()
args.output_src = args.file_prefix + ".src"
args.output_tgt = args.file_prefix + ".tgt"
log = Logger("my_filter", "my_filter.log").log()
def main(data):
unpack = Unpack(args.separator)
src_lines = []
tgt_lines = []
for k, line in enumerate(data):
try:
src, tgt, change_order = unpack.unpack(line)
except Exception as e:
log.error(f"unpack error: {e.__class__}, {e.__context__}, ### {line.strip()}")
continue
src_lines.append(src + "\n")
tgt_lines.append(tgt + "\n")
return src_lines, tgt_lines
if __name__ == '__main__':
with open(args.file_prefix, "r", encoding="utf8") as f:
data = f.readlines()
src_lines, tgt_lines = main(data)
with open(args.output_src, "w", encoding="utf8") as f:
f.writelines(src_lines)
with open(args.output_tgt, "w", encoding="utf8") as f:
f.writelines(tgt_lines)
| 29.6
| 90
| 0.638889
| 177
| 1,332
| 4.570621
| 0.372881
| 0.049444
| 0.051916
| 0.059333
| 0.066749
| 0.066749
| 0.066749
| 0
| 0
| 0
| 0
| 0.00282
| 0.201201
| 1,332
| 44
| 91
| 30.272727
| 0.757519
| 0.075826
| 0
| 0
| 0
| 0
| 0.130506
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93baf5e4d83867b7e987a8bdfa95d1e350aa7b07
| 10,173
|
py
|
Python
|
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 22
|
2021-11-24T01:23:07.000Z
|
2022-03-26T23:24:46.000Z
|
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | null | null | null |
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 3
|
2021-12-10T09:42:51.000Z
|
2022-02-16T02:22:50.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import json
import urllib.parse
import boto3
import decimal
from decimal import Decimal
from datetime import datetime
from chalice import Chalice
from chalice import IAMAuthorizer
from chalice import ChaliceViewError, BadRequestError, NotFoundError
from botocore.config import Config
from botocore.client import ClientError
from boto3.dynamodb.conditions import Key, Attr, In
from jsonschema import validate, ValidationError
from chalicelib import replace_decimals
s3_client = boto3.client("s3")
ddb_resource = boto3.resource("dynamodb")
PLUGIN_RESULT_TABLE_NAME = os.environ['PLUGIN_RESULT_TABLE_NAME']
def populate_segment_data_matching(segment_response_data, tracknumber):
result = {}
optoLength = 0
if 'OptoEnd' in segment_response_data and 'OptoStart' in segment_response_data:
# By default OptoEnd and OptoStart are maps and have no Keys. Only when they do, we check for TrackNumber's
if len(segment_response_data['OptoEnd'].keys()) > 0 and len(segment_response_data['OptoStart'].keys()) > 0:
try:
optoLength = segment_response_data['OptoEnd'][tracknumber] - segment_response_data['OptoStart'][
tracknumber]
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
# Calculate Opto Clip Duration for each Audio Track
optoDurationsPerTrack = []
if 'OptoEnd' in segment_response_data and 'OptoStart' in segment_response_data:
for k in segment_response_data['OptoStart'].keys():
try:
optoDur = {}
optoDur[k] = segment_response_data['OptoEnd'][k] - segment_response_data['OptoStart'][k]
optoDurationsPerTrack.append(optoDur)
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
optoClipLocation = ''
if 'OptimizedClipLocation' in segment_response_data:
# This is not ideal. We need to check of there exists a OptimizedClipLocation with the requested TrackNumber.
# If not, likely a problem with Clip Gen. Instead of failing, we send an empty value for optoClipLocation back.
for trackNo in segment_response_data['OptimizedClipLocation'].keys():
if str(trackNo) == str(tracknumber):
optoClipLocation = create_signed_url(segment_response_data['OptimizedClipLocation'][tracknumber])
break
origClipLocation = ''
if 'OriginalClipLocation' in segment_response_data:
for trackNo in segment_response_data['OriginalClipLocation'].keys():
if str(trackNo) == str(tracknumber):
origClipLocation = create_signed_url(segment_response_data['OriginalClipLocation'][tracknumber])
break
label = ''
if 'Label' in segment_response_data:
label = segment_response_data['Label']
if str(label) == "":
label = '<no label plugin configured>'
result = {
'OriginalClipLocation': origClipLocation,
'OriginalThumbnailLocation': create_signed_url(
segment_response_data[
'OriginalThumbnailLocation']) if 'OriginalThumbnailLocation' in segment_response_data else '',
'OptimizedClipLocation': optoClipLocation,
'OptimizedThumbnailLocation': create_signed_url(
segment_response_data[
'OptimizedThumbnailLocation']) if 'OptimizedThumbnailLocation' in segment_response_data else '',
'StartTime': segment_response_data['Start'],
'Label': label,
'FeatureCount': 'TBD',
'OrigLength': 0 if 'Start' not in segment_response_data else segment_response_data['End'] -
segment_response_data['Start'],
'OptoLength': optoLength,
'OptimizedDurationPerTrack': optoDurationsPerTrack,
'OptoStartCode': '' if 'OptoStartCode' not in segment_response_data else segment_response_data['OptoStartCode'],
'OptoEndCode': '' if 'OptoEndCode' not in segment_response_data else segment_response_data['OptoEndCode']
}
return result
def create_signed_url(s3_path):
bucket, objkey = split_s3_path(s3_path)
try:
expires = 86400
url = s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': bucket,
'Key': objkey
}, ExpiresIn=expires)
return url
except Exception as e:
print(e)
raise e
def split_s3_path(s3_path):
path_parts = s3_path.replace("s3://", "").split("/")
bucket = path_parts.pop(0)
key = "/".join(path_parts)
return bucket, key
def get_event_segment_metadata(name, program, classifier, tracknumber):
"""
Gets the Segment Metadata based on the segments found during Segmentation/Optimization process.
"""
name = urllib.parse.unquote(name)
program = urllib.parse.unquote(program)
classifier = urllib.parse.unquote(classifier)
tracknumber = urllib.parse.unquote(tracknumber)
try:
# Get Event Segment Details
# From the PluginResult Table, get the Clips Info
plugin_table = ddb_resource.Table(PLUGIN_RESULT_TABLE_NAME)
response = plugin_table.query(
KeyConditionExpression=Key("PK").eq(f"{program}#{name}#{classifier}"),
ScanIndexForward=False
)
plugin_responses = response['Items']
while "LastEvaluatedKey" in response:
response = plugin_table.query(
ExclusiveStartKey=response["LastEvaluatedKey"],
KeyConditionExpression=Key("PK").eq(f"{program}#{name}#{classifier}"),
ScanIndexForward=False
)
plugin_responses.extend(response["Items"])
# if "Items" not in plugin_response or len(plugin_response["Items"]) == 0:
# print(f"No Plugin Responses found for event '{name}' in Program '{program}' for Classifier {classifier}")
# raise NotFoundError(f"No Plugin Responses found for event '{name}' in Program '{program}' for Classifier {classifier}")
clip_info = []
for res in plugin_responses:
optoLength = 0
if 'OptoEnd' in res and 'OptoStart' in res:
# By default OptoEnd and OptoStart are maps and have no Keys. Only when they do, we check for TrackNumber's
if len(res['OptoEnd'].keys()) > 0 and len(res['OptoStart'].keys()) > 0:
try:
optoLength = res['OptoEnd'][tracknumber] - res['OptoStart'][tracknumber]
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
# Calculate Opto Clip Duration for each Audio Track
optoDurationsPerTrack = []
if 'OptoEnd' in res and 'OptoStart' in res:
for k in res['OptoStart'].keys():
try:
optoDur = {}
optoDur[k] = res['OptoEnd'][k] - res['OptoStart'][k]
optoDurationsPerTrack.append(optoDur)
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
optoClipLocation = ''
if 'OptimizedClipLocation' in res:
# This is not ideal. We need to check of there exists a OptimizedClipLocation with the requested TrackNumber.
# If not, likely a problem with Clip Gen. Instead of failing, we send an empty value for optoClipLocation back.
for trackNo in res['OptimizedClipLocation'].keys():
if str(trackNo) == str(tracknumber):
optoClipLocation = create_signed_url(res['OptimizedClipLocation'][tracknumber])
break
origClipLocation = ''
if 'OriginalClipLocation' in res:
for trackNo in res['OriginalClipLocation'].keys():
if str(trackNo) == str(tracknumber):
origClipLocation = create_signed_url(res['OriginalClipLocation'][tracknumber])
break
label = ''
if 'Label' in res:
label = res['Label']
if str(label) == "":
label = '<no label plugin configured>'
clip_info.append({
'OriginalClipLocation': origClipLocation,
'OriginalThumbnailLocation': create_signed_url(
res['OriginalThumbnailLocation']) if 'OriginalThumbnailLocation' in res else '',
'OptimizedClipLocation': optoClipLocation,
'OptimizedThumbnailLocation': create_signed_url(
res['OptimizedThumbnailLocation']) if 'OptimizedThumbnailLocation' in res else '',
'StartTime': res['Start'],
'Label': label,
'FeatureCount': 'TBD',
'OrigLength': 0 if 'Start' not in res else res['End'] - res['Start'],
'OptoLength': optoLength,
'OptimizedDurationPerTrack': optoDurationsPerTrack,
'OptoStartCode': '' if 'OptoStartCode' not in res else res['OptoStartCode'],
'OptoEndCode': '' if 'OptoEndCode' not in res else res['OptoEndCode']
})
final_response = {}
final_response['Segments'] = clip_info
except NotFoundError as e:
print(e)
print(f"Got chalice NotFoundError: {str(e)}")
raise
except Exception as e:
print(e)
print(f"Unable to get the Event '{name}' in Program '{program}': {str(e)}")
raise ChaliceViewError(f"Unable to get the Event '{name}' in Program '{program}': {str(e)}")
else:
return replace_decimals(final_response)
| 45.013274
| 132
| 0.626954
| 1,051
| 10,173
| 5.943863
| 0.196004
| 0.076837
| 0.097327
| 0.050424
| 0.62558
| 0.580439
| 0.502481
| 0.435569
| 0.427405
| 0.392668
| 0
| 0.004807
| 0.284282
| 10,173
| 226
| 133
| 45.013274
| 0.85318
| 0.165831
| 0
| 0.417143
| 0
| 0
| 0.183722
| 0.071691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022857
| false
| 0.022857
| 0.085714
| 0
| 0.131429
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93bc932331d06fe620b9dc241c2d48eeb8fdbdb8
| 9,559
|
py
|
Python
|
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
from xml.etree import ElementTree as Etree
from model import *
from astro_unit import *
from io import StringIO
import logging
class FieldMeta:
"""
OEC field metadata.
"""
def __init__(self, datatype: str, unit: str = None):
self.type = datatype
self.unit = unit
# Maps field name to tuple of (type, unit)
# Only the following columns will be understood
PLANET_FIELDS = {
"semimajoraxis": FieldMeta("number", 'AU'),
"eccentricity": FieldMeta("number"), # unit not needed
"periastron": FieldMeta("number", 'deg'),
"longitude": FieldMeta("number", 'deg'),
"ascendingnode": FieldMeta("number", 'deg'),
"inclination": FieldMeta("number", 'deg'),
"impactparameter": FieldMeta("number"), # unit not needed
"meananomaly": FieldMeta("number", 'deg'),
"period": FieldMeta("number", 'days'),
"transittime": FieldMeta("number", 'BJD'),
"periastrontime": FieldMeta("number", 'BJD'),
"maximumrvtime": FieldMeta("number", 'BJD'),
"separation": FieldMeta("number", 'arcsec'), # unit on xml element
"mass": FieldMeta("number", 'M_j'),
"radius": FieldMeta("number", 'R_j'),
"temperature": FieldMeta("number", 'K'),
"age": FieldMeta("number", 'Gyr'),
# "discoverymethod": FieldMeta("discoverymethodtype"),
# "istransiting": FieldMeta("boolean"),
# "description": "xs:string",
"discoveryyear": FieldMeta("number", None),
# "lastupdate": FieldMeta("lastupdatedef", None),
# "image",
# "imagedescription",
"spinorbitalignment": FieldMeta("number", 'deg'),
"positionangle": FieldMeta("number", 'deg'),
# "metallicity": FieldMeta("number"), # unit not needed
# "spectraltype": FieldMeta("spectraltypedef"),
# "magB": FieldMeta("number", None),
"magH": FieldMeta("number", None),
"magI": FieldMeta("number", None),
"magJ": FieldMeta("number", None),
"magK": FieldMeta("number", None),
# "magR": FieldMeta("number", None),
# "magU": FieldMeta("number", None),
"magV": FieldMeta("number", None)
}
class Adapter:
"""
Reads/writes OEC files.
"""
def __init__(self, schema_file: str=None):
"""
:param schema_file: Schema file (*.xsd)
"""
# process schema
if schema_file:
self._schema_tree = Etree.parse(schema_file).getroot()
@staticmethod
def _read_number(field: Etree.Element, fieldmeta: FieldMeta)\
-> Quantity:
# read unit
unit = fieldmeta.unit
if unit is None: # check if element has unit defined
unit = field.get('unit')
# read limits/errors
lower, upper = field.get('lowerlimit'), field.get('upperlimit')
is_limit = bool(lower or upper)
if not is_limit:
lower, upper = field.get('errorminus'), field.get('errorplus')
q = Quantity(field.text, unit, error=(lower, upper), is_limit=is_limit)
return q
@staticmethod
def _read_planet(root: Etree.Element, system_name: str) -> Planet:
"""
Reads out a planet from an xml element.
:param root: Must be a planet element.
:param system_name: System name.
:return: Planet object.
"""
if root.tag != 'planet':
raise NameError('Root must be a planet element')
default_name = root.find('name').text
if not default_name:
raise SyntaxError('Could not find planet name')
all_names = set(name.text for name in root.findall('name'))
planet = Planet(default_name, system_name, all_names=all_names)
for field in next(root.iter()):
fieldmeta = PLANET_FIELDS.get(field.tag)
if fieldmeta:
try:
# Some OEC files are weird
# e.g. KOI-12.xml, line 27 is
# <mass upperlimit="8.7" />
if fieldmeta.type == "number":
planet.prop[field.tag] = \
Adapter._read_number(field, fieldmeta)
else:
planet.prop[field.tag] = field.text
except ValueError as e:
logging.debug("[%s].[%s]: %s" %
(default_name, field.tag, e))
except Exception as e:
logging.exception(e)
return planet
def read_system(self, file: str) -> System:
"""
Reads out planets in a system.
:param file: Path to a system xml file.
:return: A list of planets.
"""
tree = Etree.parse(file)
root = tree.getroot()
if root.tag != 'system':
raise NameError('Root must be a system element')
all_names = set(name.text for name in root.findall('name'))
system = System(root.find('name').text, file, all_names=all_names)
for planet_xml in root.iter('planet'):
planet = self._read_planet(planet_xml, system.name)
system.planets.append(planet)
return system
@staticmethod
def _write_number(field: Etree.Element, number: Quantity) -> bool:
# attrib is a dictionary holding the attributes of this element
attrib = field.attrib
# silently clear existing error terms
attrib.pop('errorminus', None)
attrib.pop('errorplus', None)
attrib.pop('lowerlimit', None)
attrib.pop('upperlimit', None)
# set new value
field.text = number.value
# set new error terms
if number.error:
if number.is_limit:
attrib['lowerlimit'], attrib['upperlimit'] = number.error
else:
attrib['errorminus'], attrib['errorplus'] = number.error
return True
@staticmethod
def _write_planet_update(planet: Etree.Element, update: PlanetUpdate) \
-> bool:
succeeded = True
# loop through new values in the update objects
for field, new_value in update.fields.items():
try:
prop_elem = planet.find(field)
if prop_elem is None:
# the original planet does not have this field
logging.debug("Creating new field '%s'" % field)
# create the field under the planet
prop_elem = Etree.SubElement(planet, field)
# write the new value
succeeded &= Adapter._write_number(prop_elem, new_value)
except Exception as e:
logging.exception(e)
succeeded = False
return succeeded
@staticmethod
def _write_system_update(root: Etree.Element,
update: PlanetarySysUpdate) -> bool:
if update.new:
# a new system?
raise NotImplementedError
succeeded = True
for planet_update in update.planets:
if planet_update.new:
succeeded = False
logging.debug('Skipped new planet update %r' % planet_update)
continue
# find planet element with the name
planet_elem = root.find('.//planet[name="%s"]' %
planet_update.name)
# planet does not exist in the file?
# creating a new planet isn't as easy,
# need some info about the host star.
if planet_elem is None:
succeeded = False
logging.debug('Could not find planet <%s>' %
planet_update.name)
continue
# apply the update to the current planet
logging.debug('Updating planet <%s>...' %
planet_update.name)
succeeded &= Adapter._write_planet_update(
planet_elem,
planet_update)
return succeeded
# def validate(self, file: str) -> None:
# Validates an xml using schema defined by OEC.
# Raises an exception if file does not follow the schema.
# :param file: File name.
# """
# return # skip for now, because OEC itself isn't following the schema
# # tree = etree.parse(file)
# # self._schema.assertValid(tree)
def update_str(self, xml_string: str, update: PlanetarySysUpdate) \
-> Tuple[str, bool]:
"""
Apply a system update to an xml string.
Also performs a check afterwards to determine if
the action succeeded.
:param xml_string: containing the xml representation of a system
:param update: Update to be applied to the system
:return: A tuple (content, succeeded) where:
- content is the file content modified
- succeeded indicates whether the update was successful.
"""
tree = Etree.parse(StringIO(xml_string))
ok = Adapter._write_system_update(tree, update)
serialized = Etree.tostring(tree.getroot(), 'unicode', 'xml')
return serialized, ok
def update_file(self, filename: str, update: PlanetarySysUpdate) -> bool:
"""
Apply a system update to an xml file.
:param filename: The system xml file
:param update: Update to be applied to the system
:return: Whether the update was successful
"""
tree = Etree.parse(filename)
succeeded = Adapter._write_system_update(tree, update)
tree.write(filename)
return succeeded
| 37.194553
| 79
| 0.574746
| 1,039
| 9,559
| 5.202117
| 0.245428
| 0.080481
| 0.031637
| 0.012211
| 0.131915
| 0.100833
| 0.072155
| 0.059204
| 0.032562
| 0.032562
| 0
| 0.000919
| 0.316874
| 9,559
| 256
| 80
| 37.339844
| 0.826799
| 0.246679
| 0
| 0.18543
| 0
| 0
| 0.119936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066225
| false
| 0
| 0.033113
| 0
| 0.165563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93bd3505c0bee8de6a5685c5e02ee9cbc78b0fdd
| 9,072
|
py
|
Python
|
pyAnVIL/anvil/util/ingest_helper.py
|
anvilproject/client-apis
|
cbd892042e092b0a1dede4c561bcfdde15e9a3ad
|
[
"Apache-2.0"
] | 8
|
2019-07-02T20:41:24.000Z
|
2022-01-12T21:50:21.000Z
|
pyAnVIL/anvil/util/ingest_helper.py
|
mmtmn/client-apis
|
215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f
|
[
"Apache-2.0"
] | 37
|
2019-01-16T17:48:02.000Z
|
2021-08-13T21:35:54.000Z
|
pyAnVIL/anvil/util/ingest_helper.py
|
mmtmn/client-apis
|
215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f
|
[
"Apache-2.0"
] | 7
|
2019-05-13T14:59:27.000Z
|
2022-01-12T21:50:22.000Z
|
"""Validate AnVIL workspace(s)."""
import os
from google.cloud.storage import Client
from google.cloud.storage.blob import Blob
from collections import defaultdict
import ipywidgets as widgets
from ipywidgets import interact
from IPython.display import display
import pandas as pd
import firecloud.api as FAPI
from types import SimpleNamespace
import numpy as np
class NestedNamespace(SimpleNamespace):
"""Extend SimpleNamespace."""
def __init__(self, dictionary, **kwargs):
"""Initialize nested attributes."""
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, NestedNamespace(value))
else:
self.__setattr__(key, value)
class IngestHelper():
"""Validate workspace from dropdown selections."""
def __init__(self, workspace_namespace='terra-test-bwalsh', workspace_name='pyAnVIL Notebook', user_project=os.environ.get('GOOGLE_PROJECT', None)) -> None:
"""Retrieve expected schemas."""
assert user_project, "AnVIL buckets use the `Requester Pays` feature. Please include a billing project."
self.WORKSPACES = FAPI.list_workspaces().json()
self.schemas_table = FAPI.get_entities(workspace_namespace, workspace_name, 'schema').json()
self.schemas = defaultdict(dict)
for e in self.schemas_table:
a = e['attributes']
self.schemas[a['consortium']][a['entity']] = a
self.consortiums = widgets.Dropdown(options=['Choose...'] + list(self.schemas.keys()))
self.workspaces = widgets.Dropdown(options=[])
self.user_project = user_project
self.client = Client(project=self.user_project)
self.reference_schema = None
def validate(self, reference_schema, namespace, workspace_name, check_blobs=True):
"""Check target workspace against reference."""
target_entities = FAPI.list_entity_types(namespace=namespace, workspace=workspace_name).json()
reference = set(reference_schema.keys())
reference.remove('attributes')
target = set(target_entities.keys())
result = dict(workspace=workspace_name)
for entity in reference.intersection(target):
uri = None
try:
reference_fields = set([f.replace(' ', '') for f in reference_schema[entity]['required'].split(',')])
if 'bucket_fields' in reference_schema[entity]:
reference_fields.update([f.replace(' ', '') for f in reference_schema[entity].get('bucket_fields', '').split(',')])
target_fields = set(target_entities[entity]['attributeNames'] + [target_entities[entity]['idName']])
if not reference_fields.issubset(target_fields):
msg = f'fields_missing:{reference_fields - target_fields }'
else:
msg = 'OK'
result[entity] = msg
project_buckets = {}
if 'bucket_fields' in reference_schema[entity]:
for bucket_field in reference_schema[entity]['bucket_fields'].split(','):
if bucket_field not in target_fields:
result[entity] = f"{bucket_field} not found in {entity} schema."
continue
for e in FAPI.get_entities(namespace, workspace=workspace_name, etype=entity).json():
uri = e['attributes'][bucket_field]
blob = Blob.from_string(uri, client=self.client)
bucket_name = blob.bucket.name
if bucket_name not in project_buckets:
print(f"checking {workspace_name} {bucket_name}")
bucket = self.client.bucket(bucket_name, user_project=self.user_project)
project_buckets[bucket_name] = {}
for b in list(bucket.list_blobs()):
project_buckets[bucket_name][b.name] = {'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': b.name}
if blob.name not in project_buckets[bucket_name]:
result[entity] = f"{uri} does not exist in project_buckets {bucket_name}"
break
for bucket_name in project_buckets:
print(f"{bucket_name} has {len(project_buckets[bucket_name])} objects")
except Exception as e:
print(f"{workspace_name} {uri} {e}")
result[entity] = str(e)
for entity in reference - target:
if entity == 'linked_field':
continue
result[entity] = 'missing'
result['unknown_entities'] = f"{','.join(sorted(target - reference))}"
required_attributes = set([k.replace(' ', '') for k in reference_schema['attributes']['required'].split(',')])
workspace_attribute_values = FAPI.get_workspace(namespace, workspace_name).json()['workspace']['attributes']
target_attributes = set(list(workspace_attribute_values.keys()) + [f"library:{e}" for e in workspace_attribute_values.get('library', {}).keys()])
missing_workspace_keys = sorted(list(required_attributes - target_attributes))
if len(missing_workspace_keys) == 0:
result['missing_workspace_keys'] = 'OK'
else:
result['missing_workspace_keys'] = ','.join(missing_workspace_keys)
missing_xrefs = self.cross_ref(reference_schema, namespace, workspace_name)
result['missing_xrefs'] = ','.join(missing_xrefs)
return result
def cross_ref(self, reference_schema, namespace, workspace_name):
"""Evaluate 'join' between two entities."""
if 'linked_field' not in reference_schema:
return []
def get_property(entity, entity_name, expression):
return eval(expression, {entity_name: NestedNamespace(entity)})
item = reference_schema['linked_field']
join = item['relationship']
(left, right) = join.split('=')
# print(left, right)
left_entity = left.split('.')[0]
right_entity = right.split('.')[0]
left_keys = set([get_property(e, left_entity, left) for e in FAPI.get_entities(namespace, workspace=workspace_name, etype=left_entity).json()])
right_keys = set([get_property(e, right_entity, right) for e in FAPI.get_entities(namespace, workspace=workspace_name, etype=right_entity).json()])
return left_keys - right_keys
def interact(self):
"""Use widgets to display drop downs for consortiums and workspaces, handle user selections."""
pd.set_option("display.max_rows", None, "display.max_columns", None)
def update_workspaces(*args):
self.workspaces.options = ['Choose...', 'All workspaces', 'This workspace'] + sorted([w['workspace']['name'] for w in self.WORKSPACES if 'anvil-datastorage' in w['workspace']['namespace'] and self.consortiums.value.lower() in w['workspace']['name'].lower()])
# Tie the image options to directory value
self.consortiums.observe(update_workspaces, 'value')
# Show the images
def show_workspace(consortium, workspace):
namespace = 'anvil-datastorage'
self.reference_schema = self.schemas[consortium]
reference_df = pd.DataFrame(
[dict(id=e['name'], **e['attributes']) for e in self.schemas_table]
).set_index('id').query(f'consortium == "{consortium}"').replace(np.nan, '', regex=True).style.set_caption("Reference")
if workspace and workspace == 'All workspaces':
print("Working...")
validations = []
for workspace_name in [w['workspace']['name'] for w in self.WORKSPACES if 'anvil-datastorage' in w['workspace']['namespace'] and self.consortiums.value.lower() in w['workspace']['name'].lower()]:
validation = self.validate(self.schemas[consortium], namespace, workspace_name)
validations.append(validation)
df = pd.DataFrame(validations).set_index('workspace').style.set_caption(f"{consortium}/{workspace}")
display(reference_df)
display(df)
return
if workspace == 'This workspace':
workspace = os.environ['WORKSPACE_NAME']
namespace = os.environ['WORKSPACE_NAMESPACE']
if workspace and workspace != 'Choose...':
df = pd.DataFrame([self.validate(self.schemas[consortium], namespace, workspace)])
df = df.set_index('workspace').style.set_caption(f"{consortium}/{workspace}")
display(reference_df)
display(df)
return
_ = interact(show_workspace, consortium=self.consortiums, workspace=self.workspaces)
| 53.680473
| 270
| 0.61541
| 982
| 9,072
| 5.504073
| 0.201629
| 0.048104
| 0.022017
| 0.021277
| 0.211286
| 0.177058
| 0.153747
| 0.121184
| 0.108233
| 0.108233
| 0
| 0.00105
| 0.264991
| 9,072
| 168
| 271
| 54
| 0.809538
| 0.044202
| 0
| 0.096296
| 0
| 0
| 0.141864
| 0.021094
| 0
| 0
| 0
| 0
| 0.007407
| 1
| 0.059259
| false
| 0
| 0.081481
| 0.007407
| 0.2
| 0.02963
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c35e82e3070b5dcaa7b5ce0646c0a3d9c9b51e
| 5,760
|
py
|
Python
|
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | 1
|
2017-03-11T14:14:12.000Z
|
2017-03-11T14:14:12.000Z
|
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | null | null | null |
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Utility file for the HASYv2 dataset.
See https://arxiv.org/abs/1701.08380 for details.
"""
from __future__ import absolute_import
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
import scipy.ndimage
import os
import tarfile
import shutil
import csv
from six.moves import cPickle as pickle
n_classes = 369
labels = []
def _load_csv(filepath, delimiter=',', quotechar="'"):
"""
Load a CSV file.
Parameters
----------
filepath : str
Path to a CSV file
delimiter : str, optional
quotechar : str, optional
Returns
-------
list of dicts : Each line of the CSV file is one element of the list.
"""
data = []
csv_dir = os.path.dirname(filepath)
with open(filepath, 'rb') as csvfile:
reader = csv.DictReader(csvfile,
delimiter=delimiter,
quotechar=quotechar)
for row in reader:
for el in ['path', 'path1', 'path2']:
if el in row:
row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))
data.append(row)
return data
def _generate_index(csv_filepath):
"""
Generate an index 0...k for the k labels.
Parameters
----------
csv_filepath : str
Path to 'test.csv' or 'train.csv'
Returns
-------
dict : Maps a symbol_id as in test.csv and
train.csv to an integer in 0...k, where k is the total
number of unique labels.
"""
symbol_id2index = {}
data = _load_csv(csv_filepath)
i = 0
labels = []
for item in data:
if item['symbol_id'] not in symbol_id2index:
symbol_id2index[item['symbol_id']] = i
labels.append(item['latex'])
i += 1
return symbol_id2index, labels
def load_data():
"""
Load HASYv2 dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
# Download if not already done
fname = 'HASYv2.tar.bz2'
origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'
fpath = get_file(fname, origin=origin, untar=False,
md5_hash='fddf23f36e24b5236f6b3a0880c778e3')
path = os.path.dirname(fpath)
# Extract content if not already done
untar_fpath = os.path.join(path, "HASYv2")
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:bz2')
try:
tfile.extractall(path=untar_fpath)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
# Create pickle if not already done
pickle_fpath = os.path.join(untar_fpath, "fold1.pickle")
if not os.path.exists(pickle_fpath):
# Load mapping from symbol names to indices
symbol_csv_fpath = os.path.join(untar_fpath, "symbols.csv")
symbol_id2index, labels = _generate_index(symbol_csv_fpath)
globals()["labels"] = labels
# Load first fold
fold_dir = os.path.join(untar_fpath, "classification-task/fold-1")
train_csv_fpath = os.path.join(fold_dir, "train.csv")
test_csv_fpath = os.path.join(fold_dir, "test.csv")
train_csv = _load_csv(train_csv_fpath)
test_csv = _load_csv(test_csv_fpath)
WIDTH = 32
HEIGHT = 32
x_train = np.zeros((len(train_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
x_test = np.zeros((len(test_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
y_train, s_train = [], []
y_test, s_test = [], []
# Load training data
for i, data_item in enumerate(train_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_train.append(fname)
x_train[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_train.append(label)
y_train = np.array(y_train, dtype=np.int64)
# Load test data
for i, data_item in enumerate(test_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_test.append(fname)
x_train[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_test.append(label)
y_test = np.array(y_test, dtype=np.int64)
data = {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test,
'labels': labels
}
# Store data as pickle to speed up later calls
with open(pickle_fpath, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(pickle_fpath, 'rb') as f:
data = pickle.load(f)
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
globals()["labels"] = data['labels']
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_dim_ordering() == 'tf':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| 31.823204
| 78
| 0.562326
| 744
| 5,760
| 4.170699
| 0.25672
| 0.030938
| 0.029004
| 0.02417
| 0.211086
| 0.168869
| 0.152755
| 0.101837
| 0.087657
| 0.087657
| 0
| 0.022256
| 0.321354
| 5,760
| 180
| 79
| 32
| 0.771553
| 0.161806
| 0
| 0.125
| 0
| 0
| 0.073166
| 0.012372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.089286
| 0
| 0.142857
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c65b7f60b1d4ed3df0c1dfda29fa877d20e341
| 8,071
|
py
|
Python
|
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
"""
Based on Extended kalman filter (EKF) localization sample in PythonRobotics by Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
# Simulation parameter
INPUT_NOISE = np.diag([0.1, np.deg2rad(30.0)]) ** 2
GPS_NOISE = np.diag([0.1, 0.1]) ** 2
# Covariance for EKF simulation
Q = np.diag([
0.02, # variance of location on x-axis
0.02, # variance of location on y-axis
np.deg2rad(10.0), # variance of yaw angle
0.1 # variance of velocity
]) ** 2 # predict state covariance
# Observation x,y position covariance, now dynamic from receiver (input stream)
# R = np.diag([0.02, 0.02]) ** 2
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
return np.array([[v], [yawrate]])
def simulate(xTrue, u, dt: float):
xTrue = motion_model(xTrue, u, dt)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
return xTrue, z, ud
def observation(x_true, xd, u, dt: float):
# simulation
x_true = motion_model(x_true, u, dt)
# add noise to gps x-y
z = observation_model(x_true) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
xd = motion_model(xd, ud, dt)
return x_true, z, xd, ud
def motion_model(x, u, dt: float):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[dt * math.cos(x[2, 0]), 0],
[dt * math.sin(x[2, 0]), 0],
[0.0, dt],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
z = H @ x
return z
def jacob_f(x, u, DT: float):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw)],
[0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
def jacob_h():
# Jacobian of Observation Model
jH = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
return jH
def ekf_estimation(x_est, P_est, z, u, R, dt: float):
# Predict
x_pred = motion_model(x_est, u, dt)
jF = jacob_f(x_est, u, dt)
P_pred = jF @ P_est @ jF.T + Q
# Update
jH = jacob_h()
z_pred = observation_model(x_pred)
y = z - z_pred
S = jH @ P_pred @ jH.T + R
K = P_pred @ jH.T @ np.linalg.inv(S)
x_est = x_pred + K @ y
P_est = (np.eye(len(x_est)) - K @ jH) @ P_pred
return x_est, P_est
def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
Pxy = PEst[0:2, 0:2]
eigval, eigvec = np.linalg.eig(Pxy)
if eigval[0] >= eigval[1]:
bigind = 0
smallind = 1
else:
bigind = 1
smallind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
a = math.sqrt(eigval[bigind])
b = math.sqrt(eigval[smallind])
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eigvec[bigind, 1], eigvec[bigind, 0])
rot = np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
fx = rot @ (np.array([x, y]))
px = np.array(fx[0, :] + xEst[0, 0]).flatten()
py = np.array(fx[1, :] + xEst[1, 0]).flatten()
plt.plot(px, py, "--r")
async def read_simulation():
dt = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
time = 0.0
hdop = 0.1
np.random.seed(23)
# State Vector [x y yaw v]'
x_true = np.zeros((4, 1))
z = np.zeros((2, 1))
yield 0, None, z, None, None
while time <= SIM_TIME:
u = calc_input()
x_true, z, ud = simulate(x_true, u, dt)
time += dt
yield time, dt, z, ud, hdop
class History:
def __init__(self, state=None):
self.history = state
def add(self, x):
if self.history is None:
self.history = x
else:
self.history = np.hstack((x, self.history))
def plot(self, fmt):
plt.plot(self.history[0, :], self.history[1, :], fmt)
def plot_flatten(self, fmt):
plt.plot(self.history[0, :].flatten(),
self.history[1, :].flatten(), fmt)
class DeadReckonTracker:
def __init__(self, state=None):
# state vectors [x y yaw v]'
self.state = state
def init(self, state):
self.state = state
def get_state(self):
return self.state
def update(self, z, u, dt: float):
# u: input, z: observation (not used here)
self.state = motion_model(self.state, u, dt)
return self.state
class ExtendedKalmanFilterTracker:
def __init__(self, state=None):
# state vectors [x y yaw v]'
self.state = state
self.P = np.eye(4)
def init(self, state):
self.state = state
self.P = np.eye(4)
def get_state(self):
return self.state
def update(self, z, u, R, dt: float):
# u: input, z: observation (not used here)
if self.state is None:
self.state = np.array([[z[0][0]], [z[1][0]], [0], [0]])
self.state, self.P = ekf_estimation(self.state, self.P, z, u, R, dt)
return self.state
def update2(self, z, u, hdop: float, dt: float):
# u: input, z: observation (not used here)
# each component is 0.707 * hdop (hdop is radius)
if self.state is None:
self.state = np.array([[z[0][0]], [z[1][0]], [0], [0]])
R = np.diag([0.7 * hdop, 0.7 * hdop]) ** 2
self.state, self.P = ekf_estimation(self.state, self.P, z, u, R, dt)
return self.state
async def track(stream, yaw=0, speed=0):
#show_animation = True
show_animation = False
# state vectors [x y yaw v]'
first = True
def plot():
# plt.gca().invert_xaxis()
# plt.gca().invert_yaxis()
plt.axis("equal")
plt.grid(True)
hz.plot(".g")
# hdr.plot_flatten("-k")
hekf.plot_flatten("-r")
plot_covariance_ellipse(ekf.state, ekf.P)
# State Vector [x y yaw v]'
s = ekf.get_state().flatten()
# print("STATE", s)
x = s[0]
y = s[1]
yaw = s[2]
# speed = s[3]
a = 1 # * speed
plt.arrow(x, y, a * math.cos(yaw), a * math.sin(yaw))
# async for o in stream: print("track", repr(o))
async for _, dt, z, ud, hdop in stream:
# print("TRACK STREAM", dt, z, ud)
if first:
# init state with the first observation, using yaw, v = 0
s = np.array([[z[0][0]], [z[1][0]], [yaw], [speed]])
dr = DeadReckonTracker(s)
hdr = History(s)
ekf = ExtendedKalmanFilterTracker(s)
hekf = History(s)
hz = History(z)
first = False
yield s
continue
# each component is 0.707 * hdop (hdop is radius)
R = np.diag([0.7 * hdop, 0.7 * hdop]) ** 2
hdr.add(dr.update(z, ud, dt))
s = ekf.update(z, ud, R, dt)
hekf.add(s)
hz.add(z)
yield s
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plot()
plt.pause(0.001)
plot()
plt.show()
async def main():
async for s in track(read_simulation()):
print(s)
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| 25.143302
| 107
| 0.525585
| 1,275
| 8,071
| 3.246275
| 0.167059
| 0.024644
| 0.02102
| 0.016429
| 0.303455
| 0.276878
| 0.250544
| 0.225417
| 0.221551
| 0.169123
| 0
| 0.039579
| 0.317557
| 8,071
| 320
| 108
| 25.221875
| 0.711874
| 0.185355
| 0
| 0.215385
| 0
| 0
| 0.006648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117949
| false
| 0
| 0.020513
| 0.010256
| 0.220513
| 0.005128
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c70f97a9fcc20d868e2f05ea3a698a7c994530
| 974
|
py
|
Python
|
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | 4
|
2018-02-19T13:57:38.000Z
|
2022-01-08T04:10:54.000Z
|
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | null | null | null |
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | null | null | null |
l = [0, "-", "+"]
def backIter():
x = [0] # candidate solution
while len(x) > 0:
choosed = False
while (not choosed) and l.index(x[-1]) < len(l) - 1:
x[-1] = l[l.index(x[-1]) + 1] # increase the last component
choosed = consistent(x)
if choosed:
if solution(x):
solutionFound(x)
x.append(0) # expand candidate solution
else:
x.pop() # go back one component
def consistent(s):
return len(s) < n
def solution(s):
summ = list2[0]
if not len(s) == n - 1:
return False
for i in range(n - 1):
if s[i] == "-":
summ -= list2[i + 1]
else:
summ += list2[i + 1]
return summ > 0
def solutionFound(s):
print(s)
n = int(input("Give number"))
list2 = []
for i in range(n):
list2.append(int(input(str(i) + ":")))
backIter()
print("test")
| 21.173913
| 73
| 0.464066
| 127
| 974
| 3.559055
| 0.354331
| 0.013274
| 0.030973
| 0.035398
| 0.053097
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033389
| 0.38501
| 974
| 45
| 74
| 21.644444
| 0.721202
| 0.096509
| 0
| 0.058824
| 0
| 0
| 0.022892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0.029412
| 0.205882
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c86f77e89802184faaf894ae457e773562fb59
| 31,674
|
py
|
Python
|
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from enum import Enum, auto
from random import randint
from time import sleep
from typing import Optional, Tuple
class GameItem(Enum):
DEATH = auto()
WOODEN_SWORD = auto()
SIMPLE_BOW = auto()
VIOLIN = auto()
ORDINARY_SWORD = auto()
STRAHD_SLAYER_SWORD = auto()
STRAHD_SLAYER_BOW = auto()
class GameStatus(Enum):
ALIVE = auto()
DEAD = auto()
ARREGAO = auto()
WINNER = auto()
HAHA = auto()
def ask_if_yes(input_text: str) -> bool:
"""
This function asks the player a question,
and returns True if they typed yes,
or False if they typed anything else.
"""
return input(input_text).lower() in ["y", "yes", "s", "sim"]
def ask_if_wanna_continue(player_name: str) -> bool:
"""
This function asks the player if they want to continue the game,
and returns the answer.
"""
print("You reached one possible end!!!")
if ask_if_yes("Wanna change your fate? "):
sleep(2)
print("Very well then...")
sleep(2)
return True
else:
if ask_if_yes(f"{player_name} did you find the treasure I prepared for you? "):
print("I hope you are not lying, you may leave now!!!")
sleep(1)
else:
print("What a shame! you broke my heart :'(")
sleep(1)
return False
def roll_for_item(player_name: str) -> Tuple[Optional[GameItem], GameStatus]:
"""
This function rolls the dice for the player.
It returns the item that the player gained (if any),
and the status of the player after the roll.
"""
roll = randint(1, 20)
if player_name.lower() == "lurin":
print(f"You rolled {roll}!")
sleep(2)
if ask_if_yes("Since you are inspired... wanna roll again? "):
sleep(2)
roll = randint(1, 20)
print(f"Now your roll was {roll}")
if roll == 1:
print(f"HAHAHAHAHA, tragic! You got {roll}")
sleep(2)
if player_name.lower() != "lurin":
print(
f"Unfortunalety {player_name}, you are not Lurin, so you do not have another chance!!!"
)
sleep(4)
else:
print(
f"Unfortunalety fake {player_name}, even inspired you got it? You are a joke!!!"
)
sleep(4)
return None, GameStatus.DEAD
if player_name.lower() == "snow":
print(f"... you may have this *WONDERFUL DEATH* to help you kill STRAHD...")
sleep(3)
print("...the perfect item for you, huh?")
sleep(2)
print("...no, it is not a typo or some faulty logic!")
sleep(2)
print(
"It is indeed the perfect item for you... you will play dead (you are used to it)... STRAHD flew away..."
)
sleep(4)
return GameItem.DEATH, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you may have this *DEATH* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not SNOW....")
sleep(2)
print("...no, it is not a typo or some faulty logic!")
sleep(2)
print("...you are DEAD!")
sleep(2)
print("***Bad end!***")
sleep(1)
return None, GameStatus.DEAD
elif roll <= 5:
print(f"You got {roll}")
if player_name.lower() != "kaede":
print(
f"Well {player_name}, you may have this *VIOLIN* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not KAEDE.... gooood luck!")
sleep(2)
return GameItem.VIOLIN, GameStatus.ALIVE
else:
print(f"Well {player_name}, you may have this ***WONDERFUL VIOLIN***")
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
return GameItem.VIOLIN, GameStatus.ALIVE
elif roll <= 10:
print(f"You got {roll}")
if player_name.lower() != "soren":
print(
f"Well {player_name}, you may have this *SIMPLE BOW* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not Soren... gooood luck!")
sleep(2)
return GameItem.SIMPLE_BOW, GameStatus.ALIVE
else:
print(f"Well {player_name}, you may have this ***WONDERFUl SIMPLE BOW***")
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
print("just.. do not kill any cats with this, moron!!!")
sleep(2)
return GameItem.SIMPLE_BOW, GameStatus.ALIVE
elif roll <= 15:
print(f"You got {roll}")
if player_name.lower() != "vis":
print(
f"Well {player_name}, you may have this *ORDINARY SWORD* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not Vis... gooood luck!")
sleep(2)
print("and pray it won't fly...")
sleep(2)
return GameItem.ORDINARY_SWORD, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you may have this ***FANTASTIC ORDINARY SWORD*** to help you kill STRAHD"
)
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
print("if it doesn't fly...")
sleep(2)
return GameItem.ORDINARY_SWORD, GameStatus.ALIVE
elif roll < 20:
print(f"You got {roll}")
sleep(2)
print(
f"Well {player_name}, you may have ****STRAHD SLAYER SWORD***, go kill STRAHD, "
)
sleep(3)
print("...the legendary item!!!")
sleep(2)
print("...but hope it won't fly!!!")
sleep(2)
return GameItem.STRAHD_SLAYER_SWORD, GameStatus.ALIVE
elif roll == 20:
if player_name.lower() != "snow":
print(
f"Well {player_name}, you may have **** STRAHD SLAYER BOW***, go kill STRAHD, special treasures awaits you!!!"
)
sleep(3)
print("...the legendary perfect item!!!")
sleep(2)
print("...it doesn't even matter if it will fly!!!")
sleep(2)
return GameItem.STRAHD_SLAYER_BOW, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you seduced STRAHD, now you can claim your treasures"
)
sleep(2)
print(f"STRAHD licks you!!!")
sleep(4)
return GameItem.STRAHD_SLAYER_BOW, GameStatus.ALIVE
return None, GameStatus.ALIVE
def flee(player_name: str) -> GameStatus:
"""
This function asks the player if they want to flee.
It returns the status of the player after their decision to flee.
"""
if ask_if_yes("Wanna flee now? "):
sleep(2)
print("...")
sleep(1)
print("We will see if flee you can... *** MUST ROLL THE DICE ***: ")
sleep(2)
print("Careful!!!")
sleep(1)
roll_the_dice = input(
"*** Roll stealth *** (if you type it wrong it means you were not stealth) type: 'roll stealth' "
)
sleep(4)
if roll_the_dice == "roll stealth":
roll = randint(1, 20)
if roll <= 10:
print(f"you rolled {roll}!")
sleep(2)
print("It means STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
return GameStatus.DEAD
else:
print(f"you rolled {roll}!!!")
sleep(2)
print("Congratulations, you managed to be stealth!!!")
sleep(2)
print("...")
sleep(2)
print("You may flee but you will continue being poor and weak...")
sleep(2)
print("...")
sleep(2)
print(
"And remember there are real treasures waiting for you over there..."
)
sleep(4)
print("***Bad end...***")
sleep(1)
return GameStatus.ARREGAO
else:
if player_name.lower() in ["soren", "kaede", "leandro", "snow", "lurin"]:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a DOJI!!!")
sleep(2)
print("It means the STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
else:
print("I told you to be careful!")
sleep(2)
print("...........")
sleep(2)
print(f"...{player_name} you are such a klutz!!!")
sleep(2)
print("It means STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
return GameStatus.DEAD
else:
return GameStatus.ALIVE
def attack(player_name: str) -> Tuple[Optional[GameItem], GameStatus]:
"""
This function asks the player if they want to attack STRAHD.
If the player answers yes, the player rolls for an item.
This function returns the item obtained by a roll (if any),
and the status of the player.
"""
print("You shall not pass!!!")
if ask_if_yes(f"{player_name}, will you attack STRAHD? "):
sleep(1)
print("I honor your courage!")
sleep(2)
print("therefore...")
sleep(1)
print("I will help you...")
sleep(1)
print("I am giving you a chance to kill STRAHD and reclaim your treasures...")
sleep(2)
print(
"Roll the dice and have a chance to win the perfect item for you... or even some STRAHD Slayer Shit!!!"
)
sleep(3)
print("It will increase your chances...")
sleep(2)
print(
"....or kill you right away if you are as unlucky as Soren using his Sharp Shooting!!!"
)
sleep(2)
if ask_if_yes("Wanna roll the dice? "):
return roll_for_item(player_name)
else:
if ask_if_yes("Are you sure? "):
sleep(2)
print("So you have chosen... Death!")
sleep(2)
return GameItem.DEATH, GameStatus.DEAD
else:
sleep(2)
print("Glad you changed your mind...")
sleep(2)
print("Good... very good indeed...")
sleep(2)
return roll_for_item(player_name)
else:
print("If you won't attack STRAHD... then...")
sleep(2)
return None, flee(player_name)
def decide_if_strahd_flies(player_name: str) -> bool:
"""
This function asks if the player wants to roll for stealth,
which can give a chance for STRAHD not to fly.
It returns whether STRAHD flies.
"""
print(
"This is your chance... STRAHD has his attention captived by his 'vampirish's business'..."
)
sleep(3)
print("You are approaching him...")
sleep(2)
print("Careful...")
sleep(2)
print("Because vampires... can fly...")
sleep(2)
print("Roll stealth (if you type it wrong it means you were not stealth)...")
roll_the_dice = input("type: 'roll stealth' ")
sleep(2)
if roll_the_dice == "roll stealth":
roll = randint(1, 20)
if roll <= 10:
print("...")
sleep(1)
print("Unlucky")
sleep(2)
print(f"You rolled {roll}")
sleep(2)
print("STRAHD...")
sleep(2)
print("...flew up")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
return True
else:
print(f"You rolled {roll}")
sleep(2)
print("Congratulations, you managed to be in stealth!")
sleep(2)
return False
else:
if player_name.lower() in ["soren", "kaede", "leandro", "snow"]:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a DOJI, STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
else:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a KLUTZ, STRAHD flew...")
sleep(2)
print("...STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
return True
def calculate_win_probability(
player_race: str, player_name: str, item: Optional[GameItem],strahd_flying: bool
) -> int:
"""
This function returns the probability
that the player defeats STRAHD.
The probability depends on the item the player is holding,
and whether STRAHD is flying.
"""
if item == GameItem.DEATH:
if player_name.lower() == "snow" and player_race.lower() == "kalashatar":
return 90
else:
return 0
elif item == GameItem.WOODEN_SWORD:
if strahd_flying:
return 5
else:
return 10
elif item == GameItem.SIMPLE_BOW:
if player_name.lower() == "soren" and player_race.lower() in [
"human",
"humano",
"elf",
"elfo",
]:
return 70
else:
return 30
elif item == GameItem.VIOLIN:
if player_name.lower() == "kaede" and player_race.lower() == "tiefling":
return 70
else:
return 30
elif item == GameItem.ORDINARY_SWORD:
if strahd_flying:
return 10
elif player_name.lower() == "vis" and player_race.lower() == "draconato":
return 80
else:
return 40
elif item == GameItem.STRAHD_SLAYER_SWORD:
if strahd_flying:
return 20
else:
return 100
elif item == GameItem.STRAHD_SLAYER_BOW:
return 100
else:
return -1
def roll_for_win(probability: int) -> bool:
"""
This function returns whether the player defeats STRAHD,
given a probability.
"""
return randint(1, 100) <= probability
def after_battle(player_race: str, player_name: str, did_win: bool) -> GameStatus:
"""
This function conducts the scenario
after the player has defeated, or not, STRAHD.
It returns the status depending on whether the player won.
"""
if did_win:
now = datetime.now()
print("A day may come when the courage of men fails…")
sleep(2)
print("but it is not THIS day, SATAN...")
sleep(2)
print("Because... you approached STRAHD...")
sleep(2)
print("Almost invisible to his senses...")
sleep(2)
print(
"Somehow your weapon hit the weak point of STRAHD's... revealing his true identity"
)
sleep(4)
print(
"He was just a bat... who looked like a DREADLORD..."
)
sleep(4)
print("It was a huge battle...")
sleep(2)
print(
f"And it was the most awkward {now.strftime('%A')} you will ever remember."
)
sleep(2)
if (
player_race.lower() in ["master", "mestre"]
and player_name.lower() == "zordnael"
):
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive 5000 dullas in Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("link")
sleep(5)
print("***CHEATER GOOD END***")
sleep(2)
return GameStatus.WINNER
elif player_race.lower() == "racist" and player_name.lower() == "lili":
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive the prizes specially prepared for everybody in dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/1Jn8YYdixNNRqCQgIClBmGLiFFxuSCQdc?usp=sharing")
sleep(5)
print("***BEST END***")
sleep(2)
return GameStatus.WINNER
if did_win:
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
if player_name.lower() == "soren":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1FerRt3mmaOm0ohSUXTkO-CmGIAluavXi?usp=sharing")
sleep(5)
print("...Your motherfuger cat killer !!!")
sleep(2)
print("***SOREN'S GOOD END***")
sleep(2)
elif player_name.lower() == "snow":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/16STFQ-_0N_54oNNsVQnMjwjcBgubxgk7?usp=sharing")
sleep(5)
print("...Your motherfuger snow flake !!!")
sleep(2)
print("***SNOW'S GOOD END***")
sleep(2)
elif player_name.lower() == "kaede":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1XN9sItRxYR4Si4gWFeJtI0HGF39zC29a?usp=sharing")
sleep(5)
print("...Your motherfuger idol !!!")
sleep(2)
print("***KAEDE'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1eP552hYwUXImmJ-DIX5o-wlp5VA96Sa0?usp=sharing")
sleep(5)
print("...Your motherfuger only roll 20 !!!")
sleep(2)
print("***LEANDRO'S GOOD END***")
sleep(2)
elif player_name.lower() == "vis":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/19GRJJdlB8NbNl3QDXQM1-0ctXSX3mbwS?usp=sharing")
sleep(5)
print("...Your motherfuger iron wall !!!")
sleep(2)
print("***VIS'S GOOD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("CONGRATULATIONS!!!!! ")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
else:
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you shall receive this link from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/0B_sxkSE6-TfETlZoOHF1bTRGTXM?usp=sharing")
sleep(5)
print("***GOOD END***")
sleep(2)
sleep(1)
return GameStatus.WINNER
if not did_win:
print("You tried to approach the devil carefully...")
sleep(2)
print("... but your hands were trembling...")
sleep(2)
print("...your weapon was not what you expected...")
sleep(2)
print("... It was a shit battle... but")
sleep(2)
print("The journey doesn't end here...")
sleep(2)
print("Death is just another way we have to choose...")
sleep(2)
print("...")
sleep(1)
if player_name.lower() == "vis":
print("I really believed in you...")
sleep(2)
print("...but I guess...")
sleep(1)
print("you shoud have stayed in your bathroom...")
sleep(2)
print("eating lemon pies...")
sleep(2)
print("...")
sleep(1)
print(f"YOU DIED {player_name}")
sleep(2)
print("***VIS'S BAD END***")
sleep(2)
elif player_name.lower() == "soren":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("Did you think it was a cat? ")
sleep(2)
print("Not today Satan!!!")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***SOREN'S BAD END***")
sleep(2)
elif player_name.lower() == "kaede":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("お。。。。")
sleep(2)
print("。。。か わ い い")
sleep(2)
print("。。。。。。こ と")
sleep(2)
print("go play you Violin in Hell...")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***KAEDES'S BAD END***")
sleep(2)
elif player_name.lower() == "snow":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("HAHAHAAHHAHAHA")
sleep(2)
print("It is cute you even tried!")
sleep(2)
print("but I will call you Nori!")
sleep(2)
print("...")
sleep(1)
print("You died! Nori!!!")
sleep(2)
print("***SNOW'S BAD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print("...Nicolas Cage Face...")
sleep(2)
print("***LEANDRO'S BAD END***")
sleep(2)
elif player_name.lower() == "buiu":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print(f"Did you really think this would work? Clown!")
sleep(2)
print("***RIDICULOUS BUIU'S END***")
sleep(2)
return GameStatus.HAHA
elif player_name.lower() in ["strahd", "dreadlord"]:
print("good try")
sleep(2)
print("...but I guess...")
sleep(2)
print("I never said you were in a cave...")
sleep(2)
print("There is sunlight now...")
sleep(2)
print("You are burning...")
sleep(2)
print("Till Death...")
sleep(2)
print("***RIDICULOUS STRAHD'S END***")
sleep(2)
else:
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("This is a shit meta game...")
sleep(2)
print(
"Designed for players from a certain 16:20 tabletop Ravenloft campaign"
)
sleep(2)
print(f"Sorry, {player_name}...")
sleep(2)
print("You are dead!!!")
sleep(2)
print("***BAD END***")
sleep(2)
sleep(1)
return GameStatus.DEAD
def main():
"""
This function conducts the entire game.
"""
wanna_continue = True
while wanna_continue:
player_race = input("Your race? ")
player_name = input("Your name? ")
status = flee(player_name)
if status == GameStatus.ALIVE:
item, status = attack(player_name)
if status == GameStatus.ALIVE:
strahd_flight = decide_if_strahd_flies(player_name)
probability = calculate_win_probability(
player_race, player_name, item, strahd_flight
)
did_win = roll_for_win(probability)
status = after_battle(player_race, player_name, did_win)
if status == GameStatus.WINNER:
sleep(5)
print(
"You are a winner, baby. But there are other possibilities over there..."
)
wanna_continue = ask_if_wanna_continue(player_name)
elif status == GameStatus.HAHA:
wanna_continue = False
else:
wanna_continue = ask_if_wanna_continue(player_name)
else:
wanna_continue = ask_if_wanna_continue(player_name)
elif status == GameStatus.DEAD:
wanna_continue = ask_if_wanna_continue(player_name)
else:
print("...")
wanna_continue = ask_if_wanna_continue(player_name)
input()
main()
| 36.281787
| 210
| 0.46224
| 3,373
| 31,674
| 4.279573
| 0.133709
| 0.074818
| 0.106685
| 0.026602
| 0.613717
| 0.5531
| 0.519778
| 0.460132
| 0.438171
| 0.385313
| 0
| 0.020088
| 0.407495
| 31,674
| 872
| 211
| 36.323395
| 0.748921
| 0.038896
| 0
| 0.630653
| 0
| 0.016332
| 0.333277
| 0.013799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012563
| false
| 0.001256
| 0.006281
| 0
| 0.096734
| 0.327889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c9a643270a43403d7d70db7f672d353ef62da2
| 635
|
py
|
Python
|
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
import numpy as np
def mds(d, dimensions=3):
"""
Multidimensional Scaling - Given a matrix of interpoint distances,
find a set of low dimensional points that have similar interpoint
distances.
"""
(n, n) = d.shape
E = (-0.5 * d ** 2)
# Use mat to get column and row means to act as column and row means.
Er = np.mat(np.mean(E, 1))
Es = np.mat(np.mean(E, 0))
# From Principles of Multivariate Analysis: A User's Perspective (page 107).
F = np.array(E - np.transpose(Er) - Es + np.mean(E))
[U, S, V] = np.linalg.svd(F)
Y = U * np.sqrt(S)
return (Y[:, 0:dimensions], S)
| 24.423077
| 80
| 0.601575
| 106
| 635
| 3.603774
| 0.59434
| 0.04712
| 0.054974
| 0.089005
| 0.062827
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021368
| 0.262992
| 635
| 25
| 81
| 25.4
| 0.794872
| 0.451969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93c9ac724fdd806412549f0dec59d52778127c89
| 492
|
py
|
Python
|
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | 2
|
2018-09-19T00:59:09.000Z
|
2022-01-09T18:38:01.000Z
|
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | null | null | null |
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from collections import Counter
# Complete the isValid function below.
def isValid(s):
freq = Counter(s)
values = list(freq.values())
values.sort()
return "YES" if values.count(values[0]) == len(values) or (values.count(values[0]) == len(values) - 1 and values[-1] - values[-2] == 1) or (values.count(values[-1]) == len(values) - 1 and values[0] == 1) else "NO"
if __name__ == "__main__":
s = input()
result = isValid(s)
print(result)
| 25.894737
| 217
| 0.630081
| 72
| 492
| 4.194444
| 0.5
| 0.092715
| 0.168874
| 0.119205
| 0.274834
| 0.178808
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.195122
| 492
| 18
| 218
| 27.333333
| 0.734848
| 0.117886
| 0
| 0
| 0
| 0
| 0.030093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93cd3692a60479202468f2712c8bb24c8cc1672a
| 841
|
py
|
Python
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 14
|
2015-04-27T20:40:46.000Z
|
2019-02-01T09:22:02.000Z
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 10
|
2015-01-05T18:11:28.000Z
|
2018-09-03T08:42:50.000Z
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 4
|
2017-03-03T16:59:39.000Z
|
2019-11-08T11:15:06.000Z
|
# codplayer supporting package
#
# Copyright 2013-2014 Peter Liljenberg <peter.liljenberg@gmail.com>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
# Don't include the audio device modules in the list of modules,
# as they may not be available on all systems
from pkg_resources import get_distribution
import os
import time
version = get_distribution('codplayer').version
# Check what file we are loaded from
try:
date = time.ctime(os.stat(__file__).st_mtime)
except OSError as e:
date = 'unknown ({})'.format(e)
def full_version():
return 'codplayer {0} (installed {1})'.format(version, date)
__all__ = [
'audio',
'command',
'config',
'db',
'model',
'player',
'rest',
'rip',
'serialize',
'sink',
'source',
'state',
'toc',
'version'
]
| 19.55814
| 70
| 0.65874
| 110
| 841
| 4.918182
| 0.745455
| 0.055453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015221
| 0.218787
| 841
| 42
| 71
| 20.02381
| 0.808219
| 0.362664
| 0
| 0
| 0
| 0
| 0.231939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0.038462
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d13c525fccba1c9782ed2b28a9ab8aac0b37da
| 339
|
py
|
Python
|
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
#!/user/bin/python3
import cv2
#loading image
img=cv2.imread("dog.jpeg")
img1=cv2.line(img,(0,0),(200,114),(110,176,123),2)
#print height and width
print(img.shape)
#to display that image
cv2.imshow("dogg",img1)
#image window holder activate
#wait key will destroy by pressing q button
cv2.waitKey(0)
cv2.destroyAllWindows()
| 16.142857
| 50
| 0.719764
| 56
| 339
| 4.357143
| 0.767857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0.132743
| 339
| 20
| 51
| 16.95
| 0.734694
| 0.439528
| 0
| 0
| 0
| 0
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d37d046fccd50496fe96e2714742d3c5e3222c
| 2,139
|
py
|
Python
|
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
import gensim
import fnmatch
import os
import pickle
import numpy as np
# from symspellpy.symspellpy import SymSpell, Verbosity # import the module
# initial_capacity = 83000
# # maximum edit distance per dictionary precalculation
# max_edit_distance_dictionary = 2
# prefix_length = 7
# sym_spell = SymSpell(initial_capacity, max_edit_distance_dictionary,
# prefix_length)
# # load dictionary
# dictionary_path = os.path.join(os.path.dirname(__file__),
# "frequency_dictionary_en_82_765.txt")
# term_index = 0 # column of the term in the dictionary text file
# count_index = 1 # column of the term frequency in the dictionary text file
# if not sym_spell.load_dictionary(dictionary_path, term_index, count_index):
# print("Dictionary file not found")
# max_edit_distance_lookup = 2
model = gensim.models.KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300.bin', binary=True)
wordlist = []
for dataset in ['yelp/']:
filelist = os.listdir('../../Data/'+dataset)
for file in filelist:
with open('../../Data/'+dataset+file,'r') as f:
line = f.readline()
while line:
# suggestions = sym_spell.lookup_compound(line, max_edit_distance_lookup)
wordlist += line.split(' ')
line = f.readline()
wordlist.append('<unk>')
wordlist.append('<m_end>')
wordlist.append('@@START@@')
wordlist.append('@@END@@')
vocabs = set(wordlist)
print(len(vocabs))
wordDict = {}
word2vec = []
wastewords = []
word2vec.append(np.zeros(300))
wordDict['<PAD>']=0
cnt=1
for word in vocabs:
if word in model.wv:
word2vec.append(model.wv[word])
wordDict[word] = cnt
cnt += 1
else:
# wastewords.append(word)
word2vec.append(np.random.uniform(-1,1,300))
wordDict[word] = cnt
cnt += 1
word2vec = np.array(word2vec)
# with open('./word2vec', "wb") as fp: #Pickling
np.save('word2vec.npy',word2vec)
with open('./wordDict', "wb") as fp: #Pickling
pickle.dump(wordDict, fp)
# with open('./word2vec', "rb") as fp: #Pickling
# word2vec = pickle.load(fp)
# with open('./wordDict', "rb") as fp: #Pickling
# wordDict = pickle.load(fp)
# pass
| 27.423077
| 118
| 0.694717
| 288
| 2,139
| 5.027778
| 0.385417
| 0.041436
| 0.041436
| 0.03453
| 0.058011
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023464
| 0.16316
| 2,139
| 78
| 119
| 27.423077
| 0.785475
| 0.486209
| 0
| 0.15
| 0
| 0
| 0.123711
| 0.043112
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d43839068d5fe40ab642bf29baf0d261531656
| 8,611
|
py
|
Python
|
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from hashlib import md5
import datetime
import pyarrow.parquet as pq
import pyarrow as pa
from src.dimension_surrogate_resolver import DimensionSurrogateResolver
def run():
for dt in ["20160201", "20160301"]:
create_fact_dimension_tables(dt)
display_output()
def create_fact_dimension_tables(dt):
data = pd.read_csv(f"input_data/weather.{dt}.csv")
pd_geo_location_dim = get_geo_location(data)
save_geo_dimension(pd_geo_location_dim)
pd_site_info_dim = get_site_info(data)
save_site_dimension(pd_site_info_dim)
pd_weather_fact = get_weather_fact(data)
fact_df, agg_df = perform_transformations(pd_weather_fact)
save_fact(fact_df, dt)
save_aggregate(agg_df, dt)
def get_geo_location(data):
geo_location = data[['Region', 'Country']].copy()
geo_location = geo_location.drop_duplicates(subset=['Region', 'Country'])
geo_location = geo_location[pd.notnull(geo_location['Country'])]
geo_location['key_column'] = geo_location.apply(lambda row: row.Region + row.Country, axis=1)
geo_location['location_id'] = (geo_location
.apply(lambda row: str(int(md5(row.key_column.encode('utf-8')).hexdigest(), 16)),
axis=1)
)
geo_location = geo_location.drop(columns='key_column').reset_index(drop=True)
return geo_location
def get_site_info(data):
site_info = data[['ForecastSiteCode', 'SiteName', 'Latitude', 'Longitude']].copy()
site_info = site_info.drop_duplicates(subset=['ForecastSiteCode', 'SiteName', 'Latitude', 'Longitude'])
site_info['SiteName'] = site_info.apply(lambda row: row.SiteName[:-1 * (len(str(row.ForecastSiteCode)) + 3)],
axis=1)
site_info['site_id'] = (site_info
.apply(lambda row: str(int(md5(str(row.ForecastSiteCode)
.encode('utf-8')).hexdigest(), 16)), axis=1)
)
site_info = site_info.sort_values('ForecastSiteCode').reset_index(drop=True)
return site_info
def time_conversion(t):
return datetime.datetime.strptime(str(t), '%H').strftime("%H:%M")
def fill_values(series):
values_counted = series.value_counts()
if values_counted.empty:
return series
most_frequent = values_counted.index[0]
new_country = series.fillna(most_frequent)
return new_country
def get_day_night(time):
if 8 <= time <= 16:
return 'D'
else:
return 'N'
def get_weather_fact(data):
weather_fact = data.copy()
weather_fact.loc[weather_fact['ScreenTemperature'] == -99, 'ScreenTemperature'] = np.nan
weather_fact['SiteName'] = weather_fact.apply(lambda row: row.SiteName[:-1 * (len(str(row.ForecastSiteCode)) + 3)],
axis=1)
group_country = weather_fact.groupby('Region')['Country']
weather_fact.loc[:, 'Country'] = group_country.transform(fill_values)
weather_fact = weather_fact.sort_values(['ForecastSiteCode', 'ObservationDate', 'ObservationTime'],
ascending=(True, True, True))
weather_fact.loc[weather_fact['ScreenTemperature'].isnull(), 'ScreenTemperature'] = (
(weather_fact['ScreenTemperature'].shift() +
weather_fact['ScreenTemperature'].shift(-1)) / 2
)
weather_fact['day_night'] = weather_fact['ObservationTime'].apply(lambda row: get_day_night(row))
weather_fact['avg_temp'] = weather_fact.fillna(0).groupby(['ForecastSiteCode', 'ObservationDate', 'day_night'])[
'ScreenTemperature'].transform('mean')
weather_fact['count_temp'] = weather_fact.groupby(['ForecastSiteCode', 'ObservationDate', 'day_night'])[
'ScreenTemperature'].transform('count')
weather_fact['avg_temp'] = np.where(weather_fact.count_temp == 0, np.nan, weather_fact.avg_temp)
weather_fact['ObservationDate'] = weather_fact.ObservationDate.str[:-9]
weather_fact['ObservationTime'] = weather_fact['ObservationTime'].apply(lambda x: time_conversion(x))
return weather_fact
def write_to_parquet(source, destination):
return source.to_parquet(f"output_data/{destination}.parquet", engine="pyarrow", index=False)
def save_geo_dimension(data):
try:
geo_dim = pd.read_parquet("output_data/geo_dimension.parquet", engine="pyarrow")
except OSError:
write_to_parquet(data, "geo_dimension")
return
pd_geo_dim = pd.concat([geo_dim, data]).drop_duplicates().reset_index(drop=True)
write_to_parquet(pd_geo_dim, "geo_dimension")
return
def save_site_dimension(data):
try:
site_dim = pd.read_parquet("output_data/site_dimension.parquet", engine="pyarrow")
except OSError:
write_to_parquet(data, "site_dimension")
return
pd_site_dim = pd.concat([site_dim, data]).drop_duplicates().reset_index(drop=True)
write_to_parquet(pd_site_dim, "site_dimension")
return
def perform_transformations(pdf):
columns_to_keep = ["ObservationDate",
"ObservationTime",
"WindDirection",
"WindSpeed",
"WindGust",
"Visibility",
"ScreenTemperature",
"Pressure",
"SignificantWeatherCode",
"ForecastSiteCode",
"Region",
"Country",
"avg_temp",
"day_night"]
df = pdf[columns_to_keep].copy()
df = add_fk(df)
agg_df = (
df[['ObservationDate', "ObservationTime", 'avg_temp', "ScreenTemperature", 'fk_location_id', 'fk_site_id', ]][
df['day_night'] == "D"].copy()
)
agg_df = (agg_df.groupby(['fk_location_id', 'fk_site_id', "ObservationDate", 'avg_temp'],
as_index=False).apply(
lambda x: dict(zip(x['ObservationTime'], x['ScreenTemperature']))).reset_index(name='Temperature'))
df = df.drop(
columns=["Region", "Country", "ForecastSiteCode", "SiteName", "avg_temp", "Latitude", "Longitude", "day_night"])
df = df.sort_values(['ObservationDate', 'ObservationTime'], ascending=(True, True))
return df, agg_df
def add_fk(df):
df = DimensionSurrogateResolver.add_fk(
"geo_location", df, "fk_location_id", {'Region': 'Region', 'Country': 'Country'},
)
df = DimensionSurrogateResolver.add_fk(
"site", df, "fk_site_id", {'ForecastSiteCode': 'ForecastSiteCode'},
)
return df
def save_fact(df, dt):
unique_dates = df['ObservationDate'].unique().tolist()
print(unique_dates)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(f"output_data/weather_fact/dt={dt}/weather_fact.parquet", table.schema) as writer:
for date in unique_dates:
df1 = df[df['ObservationDate'] == date]
table = pa.Table.from_pandas(df1, preserve_index=False)
writer.write_table(table)
def save_aggregate(data, dt):
obs_date = dt[:4] + "-" + dt[4:6]
try:
agg_fact = pd.read_parquet("output_data/fact_aggregate.parquet", engine="pyarrow")
agg_fact = agg_fact[~agg_fact['ObservationDate'].str.contains(obs_date)]
except OSError:
write_to_parquet(data, "fact_aggregate")
return
pd_agg_fact = pd.concat([agg_fact, data]).reset_index(drop=True)
write_to_parquet(pd_agg_fact, "fact_aggregate")
return
def display_output():
df = pd.read_parquet("output_data/fact_aggregate.parquet", engine="pyarrow")
geo_dim = pd.read_parquet("output_data/geo_dimension.parquet", engine="pyarrow")
site_dim = pd.read_parquet("output_data/site_dimension.parquet", engine="pyarrow")
df = df[df.avg_temp == df.avg_temp.max()]
df1 = pd.merge(df, geo_dim, left_on="fk_location_id", right_on="location_id", how='left')
df1 = pd.merge(df1, site_dim, left_on="fk_site_id", right_on="site_id", how='left')
df1 = df1.drop(columns=["fk_location_id", "location_id", "fk_site_id", "site_id"])
df1.to_csv("output_data/final_output.csv", index=False)
print("Highest temperature was recorded on {0}".format(df1['ObservationDate']))
print("The average temperature on that Day from 8am to 4pm was {0}".format(df1['avg_temp']))
print("The temperature from 8am to 4pm was {0}".format(df1['Temperature']))
print("The hottest region was {0}".format(df1['Region']))
| 42.004878
| 120
| 0.648124
| 1,042
| 8,611
| 5.082534
| 0.177543
| 0.068542
| 0.018505
| 0.021526
| 0.310612
| 0.223754
| 0.174849
| 0.13784
| 0.121979
| 0.121979
| 0
| 0.010091
| 0.217396
| 8,611
| 204
| 121
| 42.210784
| 0.775783
| 0
| 0
| 0.125
| 0
| 0
| 0.227848
| 0.042388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.041667
| 0.011905
| 0.238095
| 0.029762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d52227fd91adf6e2131607d2e901a6c4913898
| 3,294
|
py
|
Python
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 462
|
2017-10-14T16:58:36.000Z
|
2022-03-24T01:40:23.000Z
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 371
|
2017-11-28T14:00:02.000Z
|
2022-03-31T21:44:07.000Z
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 129
|
2017-11-23T20:50:28.000Z
|
2022-03-17T01:26:53.000Z
|
"""Starts a fake fan, lightbulb, garage door and a TemperatureSensor
"""
import logging
import signal
import random
from pyhap.accessory import Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import (CATEGORY_FAN,
CATEGORY_LIGHTBULB,
CATEGORY_GARAGE_DOOR_OPENER,
CATEGORY_SENSOR)
logging.basicConfig(level=logging.INFO, format="[%(module)s] %(message)s")
class TemperatureSensor(Accessory):
"""Fake Temperature sensor, measuring every 3 seconds."""
category = CATEGORY_SENSOR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
serv_temp = self.add_preload_service('TemperatureSensor')
self.char_temp = serv_temp.configure_char('CurrentTemperature')
@Accessory.run_at_interval(3)
async def run(self):
self.char_temp.set_value(random.randint(18, 26))
class FakeFan(Accessory):
"""Fake Fan, only logs whatever the client set."""
category = CATEGORY_FAN
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add the fan service. Also add optional characteristics to it.
serv_fan = self.add_preload_service(
'Fan', chars=['RotationSpeed', 'RotationDirection'])
self.char_rotation_speed = serv_fan.configure_char(
'RotationSpeed', setter_callback=self.set_rotation_speed)
self.char_rotation_direction = serv_fan.configure_char(
'RotationDirection', setter_callback=self.set_rotation_direction)
def set_rotation_speed(self, value):
logging.debug("Rotation speed changed: %s", value)
def set_rotation_direction(self, value):
logging.debug("Rotation direction changed: %s", value)
class LightBulb(Accessory):
"""Fake lightbulb, logs what the client sets."""
category = CATEGORY_LIGHTBULB
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
serv_light = self.add_preload_service('Lightbulb')
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_bulb)
def set_bulb(self, value):
logging.info("Bulb value: %s", value)
class GarageDoor(Accessory):
"""Fake garage door."""
category = CATEGORY_GARAGE_DOOR_OPENER
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_preload_service('GarageDoorOpener')\
.configure_char(
'TargetDoorState', setter_callback=self.change_state)
def change_state(self, value):
logging.info("Bulb value: %s", value)
self.get_service('GarageDoorOpener')\
.get_characteristic('CurrentDoorState')\
.set_value(value)
def get_bridge(driver):
bridge = Bridge(driver, 'Bridge')
bridge.add_accessory(LightBulb(driver, 'Lightbulb'))
bridge.add_accessory(FakeFan(driver, 'Big Fan'))
bridge.add_accessory(GarageDoor(driver, 'Garage'))
bridge.add_accessory(TemperatureSensor(driver, 'Sensor'))
return bridge
driver = AccessoryDriver(port=51826, persist_file='busy_home.state')
driver.add_accessory(accessory=get_bridge(driver))
signal.signal(signal.SIGTERM, driver.signal_handler)
driver.start()
| 31.371429
| 77
| 0.683667
| 373
| 3,294
| 5.758713
| 0.27882
| 0.037244
| 0.020484
| 0.027933
| 0.164804
| 0.110801
| 0.110801
| 0.110801
| 0.078212
| 0.040968
| 0
| 0.00419
| 0.203097
| 3,294
| 104
| 78
| 31.673077
| 0.814095
| 0.086825
| 0
| 0.153846
| 0
| 0
| 0.110366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138462
| false
| 0
| 0.092308
| 0
| 0.369231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d680ecf48e6dbb1495bab46f68ebdbe3aea08b
| 574
|
py
|
Python
|
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | 1
|
2020-12-31T00:07:40.000Z
|
2020-12-31T00:07:40.000Z
|
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | null | null | null |
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
# CRUDS
CommercialList,
CommercialDelete,
CommercialDetail,
CommercialCreate,
CommercialUpdate,
CommercialDelete,
CommercialInactivate,
# QUERY
)
urlpatterns = [
#CRUD
path('', CommercialList.as_view()),
path('create/', CommercialCreate.as_view()),
path('<pk>/', CommercialDetail.as_view()),
path('update/<pk>/', CommercialUpdate.as_view()),
path('inactivate/<pk>/', CommercialInactivate.as_view()),
path('delete/<pk>', CommercialDelete.as_view())
#QUERY
]
| 22.076923
| 61
| 0.667247
| 52
| 574
| 7.25
| 0.442308
| 0.095491
| 0.132626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182927
| 574
| 25
| 62
| 22.96
| 0.803838
| 0.038328
| 0
| 0.111111
| 0
| 0
| 0.093578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d7e71a979233c8c73b2a4018aacf592bc1a08e
| 1,277
|
py
|
Python
|
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
"""Add Hometasks for Students
Revision ID: 6e5e2b4c2433
Revises: b9acba47fd53
Create Date: 2020-01-10 20:52:40.063133
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e5e2b4c2433'
down_revision = 'b9acba47fd53'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('student_hometask',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('course_hometask_id', sa.Integer(), nullable=False),
sa.Column('student_id', sa.Integer(), nullable=False),
sa.Column('content', sa.String(length=100000), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('updated_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['course_hometask_id'], ['course_hometask.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['students.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('course_hometask_id', 'student_id', name='_course_hometask_student_uniq_const')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('student_hometask')
# ### end Alembic commands ###
| 31.925
| 103
| 0.702428
| 155
| 1,277
| 5.632258
| 0.425806
| 0.054983
| 0.068729
| 0.09622
| 0.270332
| 0.270332
| 0.210767
| 0.100802
| 0
| 0
| 0
| 0.047619
| 0.144871
| 1,277
| 39
| 104
| 32.74359
| 0.751832
| 0.24119
| 0
| 0
| 0
| 0
| 0.252146
| 0.037554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d903dc4a4d4fc536ec37d420b4604d14554d90
| 1,759
|
py
|
Python
|
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import math
import fyplot
import o80_roboball2d
from functools import partial
def _plot(frontend_robot,frontend_simulation):
plt = fyplot.Plot("o80_roboball2d",50,(2000,800))
def get_observed_angle(frontend,dof):
return frontend.read().get_observed_states().get(dof).get_position()
def get_desired_angle(frontend,dof):
return frontend.read().get_desired_states().get(dof).get_position()
def get_frequency(frontend):
return frontend.read().get_frequency()
robot_plots = ( ( partial(get_observed_angle,frontend_robot,0) , (255,0,0) ) ,
( partial(get_observed_angle,frontend_robot,1) , (0,255,0) ) ,
( partial(get_observed_angle,frontend_robot,2) , (0,0,255) ) )
sim_plots = ( ( partial(get_desired_angle,frontend_simulation,0) , (255,0,0) ) ,
( partial(get_desired_angle,frontend_simulation,1) , (0,255,0) ) ,
( partial(get_desired_angle,frontend_simulation,2) , (0,0,255) ) )
frequency_plots = ( ( partial(get_frequency,frontend_robot) , (255,0,0) ),
( partial(get_frequency,frontend_simulation) , (0,255,0) ) )
plt.add_subplot((-1.5,0.2),300,robot_plots)
plt.add_subplot((-1.5,0.2),300,sim_plots)
plt.add_subplot((0,2100),300,frequency_plots)
return plt
def run():
real_robot = o80_roboball2d.RealRobotFrontEnd("real-robot")
sim_robot = o80_roboball2d.MirroringFrontEnd("sim-robot")
plot = _plot(real_robot,sim_robot)
plot.start()
try :
while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
plot.stop()
o80_example.stop_standalone(SEGMENT_ID)
if __name__ == "__main__":
run()
| 30.327586
| 84
| 0.651507
| 226
| 1,759
| 4.778761
| 0.261062
| 0.096296
| 0.023148
| 0.088889
| 0.427778
| 0.392593
| 0.303704
| 0.037037
| 0
| 0
| 0
| 0.067587
| 0.217737
| 1,759
| 57
| 85
| 30.859649
| 0.717297
| 0
| 0
| 0
| 0
| 0
| 0.023322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0.025641
| 0.128205
| 0.076923
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93d96a3758d5ca27cf2434f779255814b61dd0c7
| 10,099
|
py
|
Python
|
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | 6
|
2020-12-15T04:26:43.000Z
|
2020-12-15T13:26:09.000Z
|
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | null | null | null |
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This file contains ELF C structs and data types."""
import ctypes
from typing import Any
from . import consts
# ELF data types.
Elf32_Addr = ctypes.c_uint32
Elf32_Off = ctypes.c_uint32
Elf32_Half = ctypes.c_uint16
Elf32_Word = ctypes.c_uint32
Elf32_Sword = ctypes.c_int32
Elf64_Addr = ctypes.c_uint64
Elf64_Off = ctypes.c_uint64
Elf64_Half = ctypes.c_uint16
Elf64_Word = ctypes.c_uint32
Elf64_Sword = ctypes.c_int32
Elf64_Xword = ctypes.c_uint64
Elf64_Sxword = ctypes.c_int64
# ELF C structs.
class CStructure(ctypes.LittleEndianStructure):
"""Little endian C structure base class."""
pass
class CUnion(ctypes.Union):
"""Native endian C union base class."""
pass
class _Ehdr(CStructure):
"""ELF header base class."""
def GetFileClass(self) -> Any:
"""Returns the file class."""
return self.e_ident[consts.EI_CLASS]
def GetDataEncoding(self) -> Any:
"""Returns the data encoding of the file."""
return self.e_ident[consts.EI_DATA]
class Elf32_Ehdr(_Ehdr):
"""ELF 32-bit header."""
_fields_ = [
("e_ident", ctypes.c_uint8 * consts.EI_NIDENT),
("e_type", Elf32_Half),
("e_machine", Elf32_Half),
("e_version", Elf32_Word),
("e_entry", Elf32_Addr),
("e_phoff", Elf32_Off),
("e_shoff", Elf32_Off),
("e_flags", Elf32_Word),
("e_ehsize", Elf32_Half),
("e_phentsize", Elf32_Half),
("e_phnum", Elf32_Half),
("e_shentsize", Elf32_Half),
("e_shnum", Elf32_Half),
("e_shstrndx", Elf32_Half),
]
class Elf64_Ehdr(_Ehdr):
"""ELF 64-bit header."""
_fields_ = [
("e_ident", ctypes.c_uint8 * consts.EI_NIDENT),
("e_type", Elf64_Half),
("e_machine", Elf64_Half),
("e_version", Elf64_Word),
("e_entry", Elf64_Addr),
("e_phoff", Elf64_Off),
("e_shoff", Elf64_Off),
("e_flags", Elf64_Word),
("e_ehsize", Elf64_Half),
("e_phentsize", Elf64_Half),
("e_phnum", Elf64_Half),
("e_shentsize", Elf64_Half),
("e_shnum", Elf64_Half),
("e_shstrndx", Elf64_Half),
]
class Elf32_Shdr(CStructure):
"""ELF 32-bit section header."""
_fields_ = [
("sh_name", Elf32_Word),
("sh_type", Elf32_Word),
("sh_flags", Elf32_Word),
("sh_addr", Elf32_Addr),
("sh_offset", Elf32_Off),
("sh_size", Elf32_Word),
("sh_link", Elf32_Word),
("sh_info", Elf32_Word),
("sh_addralign", Elf32_Word),
("sh_entsize", Elf32_Word),
]
class Elf64_Shdr(CStructure):
"""ELF 64-bit section header."""
_fields_ = [
("sh_name", Elf64_Word),
("sh_type", Elf64_Word),
("sh_flags", Elf64_Xword),
("sh_addr", Elf64_Addr),
("sh_offset", Elf64_Off),
("sh_size", Elf64_Xword),
("sh_link", Elf64_Word),
("sh_info", Elf64_Word),
("sh_addralign", Elf64_Xword),
("sh_entsize", Elf64_Xword),
]
class Elf32_Dyn(CStructure):
"""ELF 32-bit dynamic section entry."""
class _Elf32_Dyn__d_un(CUnion):
_fields_ = [("d_val", Elf32_Word), ("d_ptr", Elf32_Addr)]
_fields_ = [("d_tag", Elf32_Sword), ("d_un", _Elf32_Dyn__d_un)]
class Elf64_Dyn(CStructure):
"""ELF 64-bit dynamic section entry."""
class _Elf64_Dyn__d_un(CUnion):
_fields_ = [("d_val", Elf64_Xword), ("d_ptr", Elf64_Addr)]
_fields_ = [("d_tag", Elf64_Sxword), ("d_un", _Elf64_Dyn__d_un)]
class _Sym(CStructure):
"""ELF symbol table entry base class."""
def GetBinding(self) -> Any:
"""Returns the symbol binding."""
return self.st_info >> 4
def GetType(self) -> Any:
"""Returns the symbol type."""
return self.st_info & 0xF
def SetBinding(self, binding: int) -> None:
"""Sets the symbol binding.
Args:
binding: An integer specifying the new binding.
"""
self.SetSymbolAndType(binding, self.GetType())
def SetType(self, type_: int) -> None:
"""Sets the symbol type.
Args:
type_: An integer specifying the new type.
"""
self.SetSymbolAndType(self.GetBinding(), type_)
def SetBindingAndType(self, binding: int, type_: int) -> None:
"""Sets the symbol binding and type.
Args:
binding: An integer specifying the new binding.
type_: An integer specifying the new type.
"""
self.st_info = (binding << 4) | (type_ & 0xF)
class Elf32_Sym(_Sym):
"""ELF 32-bit symbol table entry."""
_fields_ = [
("st_name", Elf32_Word),
("st_value", Elf32_Addr),
("st_size", Elf32_Word),
("st_info", ctypes.c_uint8),
("st_other", ctypes.c_uint8),
("st_shndx", Elf32_Half),
]
class Elf64_Sym(_Sym):
"""ELF 64-bit symbol table entry."""
_fields_ = [
("st_name", Elf64_Word),
("st_info", ctypes.c_uint8),
("st_other", ctypes.c_uint8),
("st_shndx", Elf64_Half),
("st_value", Elf64_Addr),
("st_size", Elf64_Xword),
]
class _32_Rel(CStructure):
"""ELF 32-bit relocation table entry base class."""
def GetSymbol(self) -> Any:
"""Returns the symbol table index with respect to the relocation.
Symbol table index with respect to which the relocation must be made.
"""
return self.r_info >> 8
def GetType(self) -> Any:
"""Returns the relocation type."""
return self.r_info & 0xFF
def SetSymbol(self, symndx: int) -> None:
"""Sets the relocation's symbol table index.
Args:
symndx: An integer specifying the new symbol table index.
"""
self.SetSymbolAndType(symndx, self.GetType())
def SetType(self, type_: int) -> None:
"""Sets the relocation type.
Args:
type_: An integer specifying the new relocation type.
"""
self.SetSymbolAndType(self.GetSymbol(), type_)
def SetSymbolAndType(self, symndx: int, type_: int) -> None:
"""Sets the relocation's symbol table index and type.
Args:
symndx: An integer specifying the new symbol table index.
type_: An integer specifying the new relocation type.
"""
self.r_info = (symndx << 8) | (type_ & 0xFF)
class Elf32_Rel(_32_Rel):
"""ELF 32-bit relocation table entry."""
_fields_ = [("r_offset", Elf32_Addr), ("r_info", Elf32_Word)]
class Elf32_Rela(_32_Rel):
"""ELF 32-bit relocation table entry with explicit addend."""
_fields_ = [
("r_offset", Elf32_Addr),
("r_info", Elf32_Word),
("r_addend", Elf32_Sword),
]
class _64_Rel(CStructure):
"""ELF 64-bit relocation table entry base class."""
def GetSymbol(self) -> Any:
"""Returns the symbol table index with respect to the relocation.
Symbol table index with respect to which the relocation must be made.
"""
return self.r_info >> 32
def GetType(self) -> Any:
"""Returns the relocation type."""
return self.r_info & 0xFFFFFFFF
def SetSymbol(self, symndx: int) -> None:
"""Sets the relocation's symbol table index.
Args:
symndx: An integer specifying the new symbol table index.
"""
self.SetSymbolAndType(symndx, self.GetType())
def SetType(self, type_: int) -> None:
"""Sets the relocation type.
Args:
type_: An integer specifying the new relocation type.
"""
self.SetSymbolAndType(self.GetSymbol(), type_)
def SetSymbolAndType(self, symndx: int, type_: int) -> None:
"""Sets the relocation's symbol table index and type.
Args:
symndx: An integer specifying the new symbol table index.
type_: An integer specifying the new relocation type.
"""
self.r_info = (symndx << 32) | (type_ & 0xFFFFFFFF)
class Elf64_Rel(_64_Rel):
"""ELF 64-bit relocation table entry."""
_fields_ = [("r_offset", Elf64_Addr), ("r_info", Elf64_Xword)]
class Elf64_Rela(_64_Rel):
"""ELF 64-bit relocation table entry with explicit addend."""
_fields_ = [
("r_offset", Elf64_Addr),
("r_info", Elf64_Xword),
("r_addend", Elf64_Sxword),
]
class Elf32_Phdr(CStructure):
"""ELF 32-bit program header."""
_fields_ = [
("p_type", Elf32_Word),
("p_offset", Elf32_Off),
("p_vaddr", Elf32_Addr),
("p_paddr", Elf32_Addr),
("p_filesz", Elf32_Word),
("p_memsz", Elf32_Word),
("p_flags", Elf32_Word),
("p_align", Elf32_Word),
]
class Elf64_Phdr(CStructure):
"""ELF 64-bit program header."""
_fields_ = [
("p_type", Elf64_Word),
("p_flags", Elf64_Word),
("p_offset", Elf64_Off),
("p_vaddr", Elf64_Addr),
("p_paddr", Elf64_Addr),
("p_filesz", Elf64_Xword),
("p_memsz", Elf64_Xword),
("p_align", Elf64_Xword),
]
class Elf32_Nhdr(CStructure):
"""ELF 32-bit note header."""
_fields_ = [
("n_namesz", Elf32_Word),
("n_descsz", Elf32_Word),
("n_type", Elf32_Word),
]
class Elf64_Nhdr(CStructure):
"""ELF 64-bit note header."""
_fields_ = [
("n_namesz", Elf64_Word),
("n_descsz", Elf64_Word),
("n_type", Elf64_Word),
]
| 26.231169
| 77
| 0.600951
| 1,263
| 10,099
| 4.532067
| 0.160728
| 0.037736
| 0.039832
| 0.046122
| 0.472397
| 0.446366
| 0.391509
| 0.372991
| 0.328616
| 0.307652
| 0
| 0.046951
| 0.266066
| 10,099
| 384
| 78
| 26.299479
| 0.72531
| 0.285573
| 0
| 0.186529
| 0
| 0
| 0.111095
| 0
| 0
| 0
| 0.005063
| 0
| 0
| 1
| 0.088083
| false
| 0.010363
| 0.015544
| 0
| 0.352332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93db2131f51a021bb76ace2f9993a86a1d6b0e6b
| 469
|
py
|
Python
|
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | 5
|
2019-06-03T21:02:32.000Z
|
2020-12-01T08:59:50.000Z
|
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect-2018
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | null | null | null |
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect-2018
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | 1
|
2019-07-09T20:09:14.000Z
|
2019-07-09T20:09:14.000Z
|
from cbapi.response import *
from lrjob import run_liveresponse
from cbapi.example_helpers import get_cb_response_object, build_cli_parser
def main():
parser = build_cli_parser("Cb Response Live Response example")
parser.add_argument("sensorid", nargs=1)
args = parser.parse_args()
c = get_cb_response_object(args)
sensor = c.select(Sensor, int(args.sensorid[0]))
run_liveresponse(sensor.lr_session())
if __name__ == '__main__':
main()
| 26.055556
| 74
| 0.742004
| 65
| 469
| 4.984615
| 0.523077
| 0.092593
| 0.080247
| 0.117284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.15565
| 469
| 18
| 75
| 26.055556
| 0.813131
| 0
| 0
| 0
| 0
| 0
| 0.104255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e13a546c607eee62ff4605caebeeafa51bfb7a
| 6,805
|
py
|
Python
|
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | 1
|
2022-03-02T16:21:16.000Z
|
2022-03-02T16:21:16.000Z
|
import gzip
import os
import re
import sys
import time
from functools import reduce
from itertools import chain
from multiprocessing import cpu_count
import lmdb
import psutil
import joblib
from joblib import Parallel, delayed
import numpy as np
from pricePrediction import config
from pricePrediction.config import USE_MMOL_INSTEAD_GRAM
from pricePrediction.preprocessData.serializeDatapoints import getExampleId, serializeExample
from pricePrediction.utils import tryMakedir, getBucketRanges, search_buckedId, EncodedDirNamesAndTemplates
from .smilesToGraph import smiles_to_graph, compute_nodes_degree, fromPerGramToPerMMolPrice
PER_WORKER_MEMORY_GB = 2
class DataBuilder():
def __init__(self, n_cpus= config.N_CPUS):
if n_cpus is None:
mem_gib = psutil.virtual_memory().available / (1024. ** 3)
n_cpus = int(max(1, min(cpu_count(), mem_gib // PER_WORKER_MEMORY_GB)))
self.n_cpus = n_cpus
def processOneFileOfSmiles(self, encodedDir, fileNum, datasetSplit, fname):
print("processing %s"%fname)
if fname.endswith(".csv"):
open_fun = open
decode_line = lambda line: line
elif fname.endswith(".csv.gz"):
open_fun = gzip.open
decode_line = lambda line : line.decode('utf-8')
else:
raise ValueError("Bad file format")
cols = ['SMILES','price']
names = EncodedDirNamesAndTemplates(encodedDir)
outFname_base = datasetSplit + "_" + str(fileNum) + "_lmdb"
outFname = os.path.join(encodedDir, datasetSplit,outFname_base)
env = lmdb.open(outFname, map_size=10737418240)
degs = compute_nodes_degree(None)
num_examples = 0
bucket_ranges = getBucketRanges()
n_per_bucket = np.zeros(len(bucket_ranges), dtype= np.int64)
with open_fun(fname) as f_in:
header = decode_line(f_in.readline()).strip().split(",")
try:
smi_index, price_index = [ header.index(col) for col in cols]
except ValueError:
smi_index, price_index = 0,1
with env.begin(write=True) as sink, \
gzip.open(names.SELECTED_DATAPOINTS_TEMPLATE % (datasetSplit, fileNum), "wt") as f_out:
f_out.write("SMILES,price\n")
cur_time = time.time()
for i, line in enumerate(f_in):
lineArray = decode_line(line).strip().split(",")
smi, price = lineArray[smi_index], lineArray[price_index]
price = float(price)
graph = smiles_to_graph(smi)
if graph is None:
continue
#Save the original smiles-price
f_out.write("%s,%s\n"%(smi, price))
# Use the per mmol price
if USE_MMOL_INSTEAD_GRAM:
price = fromPerGramToPerMMolPrice(price, smi)
bucketId = search_buckedId( np.log(price), bucket_ranges)
n_per_bucket[bucketId] += 1
degs += compute_nodes_degree([graph])
fileId= getExampleId(outFname_base, num_examples)
sink.put(fileId, serializeExample(price, graph))
num_examples += 1
if i % 10000 == 0 and fileNum % self.n_cpus == 0:
new_time = time.time()
print("Current iteration: %d # task: %d (%.2f s) " % (i, fileNum, new_time - cur_time), end="\r")
cur_time = new_time
if fileNum % self.n_cpus == 0:
print()
return ((outFname, degs, num_examples, n_per_bucket),)
def getNFeatures(self):
one_graph = smiles_to_graph("CCCCCCO")
print(one_graph)
return dict(nodes_n_features=one_graph["x"].shape[-1], edges_n_features=one_graph["edge_attr"].shape[-1])
def prepareDataset(self, inputDir=config.DATASET_DIRNAME, encodedDir=config.ENCODED_DIR, datasetSplit="train", **kwargs):
assert datasetSplit in ["train", "val", "test"]
print("Computing %s dataset" % datasetSplit)
print("Using %d workers for data preparation"%self.n_cpus)
# os.environ["OMP_NUM_THREADS"] = "1"
# os.environ["MKL_NUM_THREADS"] = "1"
names = EncodedDirNamesAndTemplates(encodedDir)
tryMakedir(encodedDir, remove=False)
tryMakedir(os.path.join(encodedDir, datasetSplit))
tryMakedir(names.DIR_RAW_DATA_SELECTED)
fnames = [os.path.join(inputDir, fname) for fname in os.listdir(inputDir) if
re.match(config.RAW_DATA_FILE_SUFFIX, fname) and datasetSplit in fname]
assert len(fnames) > 0
results = Parallel(n_jobs=self.n_cpus, batch_size=1,
verbose=10)(delayed(self.processOneFileOfSmiles)(encodedDir, i, datasetSplit, fname)
for i, fname in enumerate(fnames))
results = chain.from_iterable(results)
results = list(results)
# print( results )
fnames_list, degrees, sizes_lis, n_per_bucket = zip(*results)
degrees = reduce(lambda prev, x: prev + x, degrees).numpy().tolist()
n_per_bucket = reduce(lambda prev, x: prev + x, n_per_bucket)
metadata_dict = {"name":datasetSplit, "fnames_list":fnames_list, "sizes_list": sizes_lis,
"total_size": sum(sizes_lis)}
metadata_dict.update(self.getNFeatures())
joblib.dump(metadata_dict,
names.DATASET_METADATA_FNAME_TEMPLATE % datasetSplit)
joblib.dump(degrees, names.DEGREES_FNAME)
# print(n_per_bucket)
joblib.dump({"bucket_ranges": getBucketRanges(), "n_per_bucket":n_per_bucket}, names.BUCKETS_FNAME_TEMPLATE % datasetSplit)
print("Dataset %s computed" % datasetSplit)
return fnames_list
if __name__ == "__main__":
print( " ".join(sys.argv))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputDir", type=str, default=config.DATASET_DIRNAME, help="Directory where smiles-price pairs are located")
parser.add_argument("-o", "--encodedDir", type=str, default=config.ENCODED_DIR)
parser.add_argument("-n", "--ncpus", type=int, default=config.N_CPUS)
args = vars( parser.parse_args())
config.N_CPUS = args.get("ncpus", config.N_CPUS)
dataBuilder = DataBuilder(n_cpus=config.N_CPUS)
dataBuilder.prepareDataset(datasetSplit="train", **args)
dataBuilder.prepareDataset(datasetSplit="val", **args)
dataBuilder.prepareDataset(datasetSplit="test", **args)
'''
python -m pricePrediction.preprocessData.prepareDataMol2Price
'''
| 42.006173
| 140
| 0.627039
| 778
| 6,805
| 5.272494
| 0.316195
| 0.018284
| 0.021941
| 0.008776
| 0.07411
| 0.042418
| 0
| 0
| 0
| 0
| 0
| 0.008842
| 0.268773
| 6,805
| 162
| 141
| 42.006173
| 0.815514
| 0.023806
| 0
| 0.016393
| 0
| 0
| 0.063052
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.032787
| false
| 0
| 0.155738
| 0
| 0.221311
| 0.065574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e2b831da7ddd82cdee3f6c7c6866a56f385beb
| 2,894
|
py
|
Python
|
lanzou/gui/workers/more.py
|
WaterLemons2k/lanzou-gui
|
f5c57f980ee9a6d47164a39b90d82eb0391ede8b
|
[
"MIT"
] | 1,093
|
2019-12-25T10:42:34.000Z
|
2022-03-28T22:35:32.000Z
|
lanzou/gui/workers/more.py
|
Enrontime/lanzou-gui
|
8e89438d938ee4994a4118502c3f14d467b55acc
|
[
"MIT"
] | 116
|
2019-12-24T04:01:43.000Z
|
2022-03-26T16:12:41.000Z
|
lanzou/gui/workers/more.py
|
Enrontime/lanzou-gui
|
8e89438d938ee4994a4118502c3f14d467b55acc
|
[
"MIT"
] | 188
|
2020-01-11T14:17:13.000Z
|
2022-03-29T09:18:34.000Z
|
from PyQt5.QtCore import QThread, pyqtSignal, QMutex
from lanzou.api import LanZouCloud
from lanzou.gui.models import Infos
from lanzou.debug import logger
class GetMoreInfoWorker(QThread):
'''获取文件直链、文件(夹)提取码描述,用于登录后显示更多信息'''
infos = pyqtSignal(object)
share_url = pyqtSignal(object)
dl_link = pyqtSignal(object)
msg = pyqtSignal(str, int)
def __init__(self, parent=None):
super(GetMoreInfoWorker, self).__init__(parent)
self._disk = None
self._infos = None
self._url = ''
self._pwd = ''
self._emit_link = False
self._mutex = QMutex()
self._is_work = False
def set_disk(self, disk):
self._disk = disk
def set_values(self, infos, emit_link=False):
self._infos = infos
self._emit_link = emit_link
self.start()
def get_dl_link(self, url, pwd):
self._url = url
self._pwd = pwd
self.start()
def __del__(self):
self.wait()
def stop(self):
self._mutex.lock()
self._is_work = False
self._mutex.unlock()
def run(self):
# infos: ID/None,文件名,大小,日期,下载次数(dl_count),提取码(pwd),描述(desc),|链接(share-url)
if not self._is_work and self._infos:
self._mutex.lock()
self._is_work = True
try:
if not self._url: # 获取普通信息
if isinstance(self._infos, Infos):
if self._infos.id: # 从 disk 运行
self.msg.emit("网络请求中,请稍候……", 0)
_info = self._disk.get_share_info(self._infos.id, is_file=self._infos.is_file)
self._infos.desc = _info.desc
self._infos.pwd = _info.pwd
self._infos.url = _info.url
if self._emit_link:
self.share_url.emit(self._infos)
else:
self.infos.emit(self._infos)
self.msg.emit("", 0) # 删除提示信息
else: # 获取下载直链
res = self._disk.get_file_info_by_url(self._url, self._pwd)
if res.code == LanZouCloud.SUCCESS:
self.dl_link.emit("{}".format(res.durl or "无")) # 下载直链
elif res.code == LanZouCloud.NETWORK_ERROR:
self.dl_link.emit("网络错误!获取失败") # 下载直链
else:
self.dl_link.emit("其它错误!") # 下载直链
except TimeoutError:
self.msg.emit("网络超时!稍后重试", 6000)
except Exception as e:
logger.error(f"GetMoreInfoWorker error: e={e}")
self._is_work = False
self._url = ''
self._pwd = ''
self._mutex.unlock()
else:
self.msg.emit("后台正在运行,请稍后重试!", 3100)
| 34.86747
| 106
| 0.516586
| 332
| 2,894
| 4.268072
| 0.325301
| 0.095272
| 0.035286
| 0.02964
| 0.074806
| 0.032463
| 0
| 0
| 0
| 0
| 0
| 0.006101
| 0.376987
| 2,894
| 82
| 107
| 35.292683
| 0.776484
| 0.051486
| 0
| 0.242857
| 0
| 0
| 0.029283
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.057143
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e5d68b70881e1e29365acb06e52f0fb4bc0b36
| 2,331
|
py
|
Python
|
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | null | null | null |
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | 50
|
2021-08-20T00:10:05.000Z
|
2022-02-21T16:44:46.000Z
|
neuro_logging/__init__.py
|
neuro-inc/neuro-logging
|
e3173a40d0e2559f113f1420ed8a3fd4a0e76dde
|
[
"Apache-2.0"
] | null | null | null |
import logging
import logging.config
import os
from importlib.metadata import version
from typing import Any, Union
from .trace import (
make_request_logging_trace_config,
make_sentry_trace_config,
make_zipkin_trace_config,
new_sampled_trace,
new_trace,
new_trace_cm,
notrace,
setup_sentry,
setup_zipkin,
setup_zipkin_tracer,
trace,
trace_cm,
)
__version__ = version(__package__)
__all__ = [
"init_logging",
"HideLessThanFilter",
"make_request_logging_trace_config",
"make_sentry_trace_config",
"make_zipkin_trace_config",
"notrace",
"setup_sentry",
"setup_zipkin",
"setup_zipkin_tracer",
"trace",
"trace_cm",
"new_sampled_trace",
"new_trace",
"new_trace_cm",
]
class HideLessThanFilter(logging.Filter):
def __init__(self, level: Union[int, str] = logging.ERROR, name: str = ""):
super().__init__(name)
if not isinstance(level, int):
try:
level = logging._nameToLevel[level]
except KeyError:
raise ValueError(f"Unknown level name: {level}")
self.level = level
def filter(self, record: logging.LogRecord) -> bool:
return record.levelno < self.level
if "NP_LOG_LEVEL" in os.environ:
_default_log_level = logging.getLevelName(os.environ["NP_LOG_LEVEL"])
else:
_default_log_level = logging.WARNING
DEFAULT_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}
},
"filters": {
"hide_errors": {"()": f"{__name__}.HideLessThanFilter", "level": "ERROR"}
},
"handlers": {
"stdout": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "standard",
"stream": "ext://sys.stdout",
"filters": ["hide_errors"],
},
"stderr": {
"class": "logging.StreamHandler",
"level": "ERROR",
"formatter": "standard",
"stream": "ext://sys.stderr",
},
},
"root": {"level": _default_log_level, "handlers": ["stderr", "stdout"]},
}
def init_logging(config: dict[str, Any] = DEFAULT_CONFIG) -> None:
logging.config.dictConfig(config)
| 25.336957
| 86
| 0.607465
| 243
| 2,331
| 5.473251
| 0.36214
| 0.049624
| 0.045113
| 0.034586
| 0.287218
| 0.243609
| 0.243609
| 0.243609
| 0.193985
| 0.193985
| 0
| 0.000575
| 0.253968
| 2,331
| 91
| 87
| 25.615385
| 0.764232
| 0
| 0
| 0.050633
| 0
| 0
| 0.277134
| 0.075504
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.075949
| 0.012658
| 0.139241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e8008d69beb181243428fecbcab6a20eb6cce6
| 3,628
|
py
|
Python
|
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/houston.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | 1
|
2019-06-12T11:34:27.000Z
|
2019-06-12T11:34:27.000Z
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from .exceptions import ImproperlyConfigured
from quantrocket.cli.utils.output import json_to_cli
class Houston(requests.Session):
"""
Subclass of `requests.Session` that provides an interface to the houston
API gateway. Reads HOUSTON_URL (and Basic Auth credentials if applicable)
from environment variables and applies them to each request. Simply provide
the path, starting with /, for example:
>>> response = houston.get("/countdown/crontab")
Since each instance of Houston is a session, you can improve performance
by using a single session for all requests. The module provides an instance
of `Houston`, named `houston`.
Use the same session as other requests:
>>> from quantrocket.houston import houston
Use a new session:
>>> from quantrocket.houston import Houston
>>> houston = Houston()
"""
DEFAULT_TIMEOUT = 30
def __init__(self):
super(Houston, self).__init__()
if "HOUSTON_USERNAME" in os.environ and "HOUSTON_PASSWORD" in os.environ:
self.auth = (os.environ["HOUSTON_USERNAME"], os.environ["HOUSTON_PASSWORD"])
@property
def base_url(self):
if "HOUSTON_URL" not in os.environ:
raise ImproperlyConfigured("HOUSTON_URL is not set")
return os.environ["HOUSTON_URL"]
def request(self, method, url, *args, **kwargs):
if url.startswith('/'):
url = self.base_url + url
timeout = kwargs.get("timeout", None)
stream = kwargs.get("stream", None)
if timeout is None and not stream:
kwargs["timeout"] = self.DEFAULT_TIMEOUT
# Move conids from params to data if too long
conids = kwargs.get("params", {}).get("conids", None)
if conids and isinstance(conids, list) and len(conids) > 1:
data = kwargs.get("data", {}) or {}
data["conids"] = conids
kwargs["params"].pop("conids")
kwargs["data"] = data
return super(Houston, self).request(method, url, *args, **kwargs)
@staticmethod
def raise_for_status_with_json(response):
"""
Raises 400/500 error codes, attaching a json response to the
exception, if possible.
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
try:
e.json_response = response.json()
e.args = e.args + (e.json_response,)
except:
e.json_response = {}
e.args = e.args + ("please check the logs for more details",)
raise e
# Instantiate houston so that all callers can share a TCP connection (for
# performance's sake)
houston = Houston()
def ping():
"""
Pings houston.
Returns
-------
json
reply from houston
"""
response = houston.get("/ping")
houston.raise_for_status_with_json(response)
return response.json()
def _cli_ping():
return json_to_cli(ping)
| 33.284404
| 88
| 0.651599
| 463
| 3,628
| 5.021598
| 0.386609
| 0.025806
| 0.014194
| 0.013763
| 0.055914
| 0.025806
| 0
| 0
| 0
| 0
| 0
| 0.006275
| 0.253308
| 3,628
| 109
| 89
| 33.284404
| 0.851975
| 0.419239
| 0
| 0.041667
| 0
| 0
| 0.106762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.041667
| 0.083333
| 0.020833
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e81c0784cf18fea2ad26a23da9cc7f264a20a2
| 3,778
|
py
|
Python
|
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | 1
|
2020-01-09T10:48:47.000Z
|
2020-01-09T10:48:47.000Z
|
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | null | null | null |
src/Sudoku/SudokuGenerator.py
|
andrea-pollastro/Sudoku
|
84d82c9a181ad87f782efe7489fa28da70993590
|
[
"MIT"
] | null | null | null |
from random import shuffle
from math import sqrt
from enum import Enum
from src.Sudoku.SudokuSolver import SudokuSolver
from src.Sudoku.Sudoku import Sudoku
"""
**** SUDOKU GENERATOR ****
Author: Andrea Pollastro
Date: September 2018
"""
class SudokuGenerator:
__solver = SudokuSolver()
def createSudoku(self, dimension, difficulty):
"""This method returns a Sudoku with a unique solution. Parameters are used to specify the dimension and the
difficulty."""
if not isinstance(dimension, SudokuDimension) and not isinstance(difficulty, SudokuDimension):
return False
sudoku = list() # grid
self.__fillSudoku(sudoku,0,_SupportStructures(dimension.value))
blanksIndexes = [i for i in range(0,len(sudoku))]
shuffle(blanksIndexes)
sudoku = self.__createBlanks(sudoku, difficulty.value, blanksIndexes, 0)
return Sudoku(sudoku)
def __fillSudoku(self, sudoku, cell, supportStructures):
"""This functions works recursively to create a complete Sudoku. It randomly assigns a value to cells. If
a contradiction comes (specifically, when there aren't values to assign to a cell), it comes back to the last
valid configuration."""
values = supportStructures.getValidValues(cell)
if(len(values) == 0):
return False
shuffle(values)
for n in values:
sudoku.append(n)
supportStructures.addValue(n,cell)
if((len(sudoku) == supportStructures.getSudokuDimension()**2) # sudoku is complete
or self.__fillSudoku(sudoku,cell+1,supportStructures)):
return True
sudoku.pop()
supportStructures.removeValue(n,cell)
return False
def __createBlanks(self, sudoku, blanks, validValues, idx):
"""This functions creates blanks into 'sudoku' to make it playable. For any blanks, it checks if there's
a unique solution. If it's not, it restores the last blank and chooses another cell."""
if blanks == 0:
return sudoku
for i in range(idx,len(validValues)):
index = validValues[i]
oldValue = sudoku[index]
sudoku[index] = 0
if self.__solver.hasUniqueSolution(sudoku) and self.__createBlanks(sudoku, blanks-1, validValues, i+1):
return sudoku
sudoku[index] = oldValue
return False
class SudokuDifficulties(Enum):
EASY = 43
MEDIUM = 50
HARD = 58
EXPERT = 61
class SudokuDimension(Enum):
CLASSIC = 9
class _SupportStructures:
def __init__(self, dimension):
self.__DIM = dimension
self.__BOXDIM = int(sqrt(dimension))
self.__rows = [set() for x in range(0, dimension)]
self.__cols = [set() for x in range(0, dimension)]
self.__boxes = [set() for x in range(0, dimension)]
def getValidValues(self, cell):
values = {x for x in range(1, self.__DIM +1)}
r, c, b = self.__getCoordinates(cell)
return list(values - (self.__rows[r] | self.__cols[c] | self.__boxes[b]))
def addValue(self, value, cell):
r, c, b = self.__getCoordinates(cell)
self.__rows[r].add(value)
self.__cols[c].add(value)
self.__boxes[b].add(value)
def removeValue(self, value, cell):
r, c, b = self.__getCoordinates(cell)
self.__rows[r].remove(value)
self.__cols[c].remove(value)
self.__boxes[b].remove(value)
def __getCoordinates(self, cell):
r = int(cell / self.__DIM)
c = cell % self.__DIM
b = int(r / self.__BOXDIM) * self.__BOXDIM + int(c / self.__BOXDIM)
return r,c,b
def getSudokuDimension(self):
return self.__DIM
| 35.641509
| 117
| 0.636051
| 456
| 3,778
| 5.111842
| 0.298246
| 0.018018
| 0.013728
| 0.018876
| 0.085371
| 0.085371
| 0.074646
| 0.06435
| 0.040326
| 0.040326
| 0
| 0.010083
| 0.264955
| 3,778
| 106
| 118
| 35.641509
| 0.829312
| 0.149021
| 0
| 0.118421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.065789
| 0.013158
| 0.460526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93e8893408bea136d9cbb5e2c4e9cac3b5b0c2f9
| 15,753
|
py
|
Python
|
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
stanza/coptic.py
|
CopticScriptorium/stanza
|
a16b152fce3d2cc325b7d67e03952bd00c878fe3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import random, os
from os.path import join as j
from collections import OrderedDict
import conllu
import torch
import pathlib
import tempfile
import depedit
import stanza.models.parser as parser
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.common import utils
from stanza.models.common.pretrain import Pretrain
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
PACKAGE_BASE_DIR = pathlib.Path(__file__).parent.absolute()
# Parser arguments -----------------------------------------------------------------------------------------------------
# These args are used by stanza.models.parser. Keys should always exactly match those you'd get from the dictionary
# obtained from running stanza.models.parser.parse_args(). The values below were selected through hyperoptimization.
DEFAULT_PARSER_ARGS = {
# general setup
'lang': 'cop',
'treebank': 'cop_scriptorium',
'shorthand': 'cop_scriptorium',
'data_dir': j(PACKAGE_BASE_DIR, 'data', 'depparse'),
'output_file': j(PACKAGE_BASE_DIR, 'coptic_data', 'scriptorium', 'pred.conllu'),
'seed': 1234,
'cuda': torch.cuda.is_available(),
'cpu': not torch.cuda.is_available(),
'save_dir': j(PACKAGE_BASE_DIR, "..", 'stanza_models'),
'save_name': None,
# word embeddings
'pretrain': True,
'wordvec_dir': j(PACKAGE_BASE_DIR, 'coptic_data', 'wordvec'),
'wordvec_file': j(PACKAGE_BASE_DIR, 'coptic_data', 'wordvec', 'word2vec', 'Coptic', 'coptic_50d.vec.xz'),
'word_emb_dim': 50,
'word_dropout': 0.3,
# char embeddings
'char': True,
'char_hidden_dim': 200,
'char_emb_dim': 50,
'char_num_layers': 1,
'char_rec_dropout': 0, # very slow!
# pos tags
'tag_emb_dim': 5,
'tag_type': 'gold',
# network params
'hidden_dim': 300,
'deep_biaff_hidden_dim': 200,
'composite_deep_biaff_hidden_dim': 100,
'transformed_dim': 75,
'num_layers': 3,
'pretrain_max_vocab': 250000,
'dropout': 0.5,
'rec_dropout': 0, # very slow!
'linearization': True,
'distance': True,
# training
'sample_train': 1.0,
'optim': 'adam',
'lr': 0.002,
'beta2': 0.95,
'max_steps': 20000,
'eval_interval': 100,
'max_steps_before_stop': 2000,
'batch_size': 1500,
'max_grad_norm': 1.0,
'log_step': 20,
# these need to be included or there will be an error when stanza tries to access them
'train_file': None,
'eval_file': None,
'gold_file': None,
'mode': None,
}
# Custom features ------------------------------------------------------------------------------------------------------
# Params for controlling the custom features we're feeding the network
FEATURE_CONFIG = {
# BIOLU or BIO
'features': [
'foreign_word',
'morph_count',
'left_morph',
'entity',
],
'foreign_word_binary': True,
'morph_count_binary': False,
'entity_encoding_scheme': 'BIOLU',
'entity_dropout': 0.30,
}
# DepEdit preprocessor which removes gold morph data and makes a few other tweaks
PREPROCESSOR = depedit.DepEdit(config_file=j(PACKAGE_BASE_DIR, "coptic_data", "depedit", "add_ud_and_flat_morph.ini"),
options=type('', (), {"quiet": True, "kill": "both"}))
# Load a lexicon of foreign words and initialize a lemma cache
with open(j(PACKAGE_BASE_DIR, 'coptic_data', 'lang_lexicon.tab'), 'r', encoding="utf8") as f:
FOREIGN_WORDS = {x.split('\t')[0]: x.split('\t')[1].rstrip()
for x in f.readlines() if '\t' in x}
FW_CACHE = {}
# load known entities and sort in order of increasing token length
with open(j(PACKAGE_BASE_DIR, 'coptic_data', 'entities.tab'), 'r', encoding="utf8") as f:
KNOWN_ENTITIES = OrderedDict(sorted(
((x.split('\t')[0], x.split('\t')[1]) for x in f.readlines()),
key=lambda x: len(x[0].split(" "))
))
def _add_entity_feature(feature_config, sentences, predict=False):
# unless we're predicting, use dropout to pretend we don't know some entities
dropout_entities = {
estr: etype for estr, etype in KNOWN_ENTITIES.items()
# three ways for an entity to not get dropped out:
# 1. we're predicting (all tokens stay)
# 2. it has only one token
# 3. we roll above the dropout threshold
if (predict
or (' ' not in estr)
or (random.random() >= feature_config['entity_dropout']))
}
def find_span_matches(tokens, pattern):
slen = len(pattern)
matches = []
for i in range(len(tokens) - (slen - 1)):
if tokens[i:i + slen] == pattern:
matches.append((i, slen))
return matches
def delete_conflicting(new_span, entities, entity_tags):
overlap_exists = lambda range1, range2: set(range1).intersection(range2)
span = lambda begin, length: list(range(begin, begin + length))
new_span = span(*new_span)
for i in range(len(entities) - 1, -1, -1):
begin, length, _ = entities[i]
# in case of overlap, remove the old entity and pop it off the list
old_span = span(begin, length)
if overlap_exists(new_span, old_span):
for j in old_span:
entity_tags[j] = "O"
entities.pop(i)
def encode(new_span, entity_tags, entity_type):
assert feature_config['entity_encoding_scheme'] in ["BIOLU", "BIO"]
if feature_config['entity_encoding_scheme'] == "BIOLU":
unit_tag = "U-"
begin_tag = "B-"
inside_tag = "I-"
last_tag = "L-"
else:
unit_tag = "B-"
begin_tag = "B-"
inside_tag = "I-"
last_tag = "I-"
begin, length = new_span
if length == 1:
entity_tags[begin] = unit_tag + entity_type
else:
for i in range(begin, begin + length):
if i == begin:
entity_tags[i] = begin_tag + entity_type
elif i == (begin + length - 1):
entity_tags[i] = last_tag + entity_type
else:
entity_tags[i] = inside_tag + entity_type
# use BIOLU encoding for entities https://github.com/taasmoe/BIO-to-BIOLU
# in case of nesting, longer entity wins
for sentence in sentences:
tokens = [t['form'] for t in sentence]
entity_tags = (['O'] * len(tokens))
entities = []
for entity_string, entity_type in dropout_entities.items():
new_spans = find_span_matches(tokens, entity_string.split(" "))
for new_span in new_spans:
delete_conflicting(new_span, entities, entity_tags)
encode(new_span, entity_tags, entity_type)
entities.append((new_span[0], new_span[1], entity_type))
for token, entity_tag in zip(sentence, entity_tags):
token['feats']['Entity'] = entity_tag
def _add_morph_count_feature(feature_config, sentences, predict=False):
for sentence in sentences:
for token in sentence:
feats = token['feats']
misc = token['misc']
feats['MorphCount'] = (
'1' if misc is None or 'Morphs' not in misc
else (
'Many' if feature_config['morph_count_binary']
else str(len(misc['Morphs'].split('-')))
)
)
token['feats'] = feats
return sentences
def _add_left_morph_feature(feature_config, sentences, predict=False):
for sentence in sentences:
for token in sentence:
feats = token['feats']
misc = token['misc']
if misc is not None and 'Morphs' in misc:
feats['LeftMorph'] = misc['Morphs'].split('-')[0]
token['feats'] = feats
return sentences
def _add_foreign_word_feature(feature_config, sentences, predict=False):
def foreign_word_lookup(lemma):
if lemma in FW_CACHE:
return FW_CACHE[lemma]
for fw, lang in FOREIGN_WORDS.items():
glob_start = fw[0] == '*'
glob_end = fw[-1] == '*'
fw = fw.replace('*', '')
if glob_start and glob_end and fw in lemma:
FW_CACHE[lemma] = lang
return lang
elif glob_start and lemma.endswith(fw):
FW_CACHE[lemma] = lang
return lang
elif glob_end and lemma.startswith(fw):
FW_CACHE[lemma] = lang
return lang
elif lemma == fw:
FW_CACHE[lemma] =lang
return lang
FW_CACHE[lemma] = False
return False
for sentence in sentences:
for token in sentence:
feats = token['feats']
lang_of_origin = foreign_word_lookup(token['lemma'])
feats['ForeignWord'] = (
'No' if not lang_of_origin
else (
'Yes' if feature_config['foreign_word_binary']
else lang_of_origin
))
token['feats'] = feats
return sentences
FEATURE_FUNCTIONS = {
'foreign_word': _add_foreign_word_feature,
'left_morph': _add_left_morph_feature,
'morph_count': _add_morph_count_feature,
'entity': _add_entity_feature,
}
def _preprocess(feature_config, conllu_string, predict):
# remove gold information
s = PREPROCESSOR.run_depedit(conllu_string)
# deserialize so we can add custom features
sentences = conllu.parse(s)
for sentence in sentences:
for token in sentence:
if token['feats'] is None:
token['feats'] = OrderedDict()
for feature_name in feature_config['features']:
assert feature_name in FEATURE_FUNCTIONS.keys()
FEATURE_FUNCTIONS[feature_name](feature_config, sentences, predict=predict)
# serialize and return
return "".join([sentence.serialize() for sentence in sentences])
def _read_conllu_arg(conllu_filepath_or_string, feature_config, gold=False, predict=False):
try:
conllu.parse(conllu_filepath_or_string)
s = conllu_filepath_or_string
except:
try:
with open(conllu_filepath_or_string, 'r', encoding="utf8") as f:
s = f.read()
conllu.parse(s)
except:
raise Exception(f'"{conllu_filepath_or_string}" must either be a valid conllu string '
f'or a filepath to a valid conllu string')
if not gold:
s = _preprocess(feature_config, s, predict)
tempf = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', delete=False)
tempf.write(s)
tempf.close()
return tempf.name
# public api -----------------------------------------------------------------------------------------------------------
def train(train, dev, save_name=None):
"""Train a new stanza model.
:param train: either a conllu string or a path to a conllu file
:param dev: either a conllu string or a path to a conllu file
:param save_name: optional, a name for your model's save file, which will appear in 'stanza_models/'
"""
args = DEFAULT_PARSER_ARGS.copy()
feature_config = FEATURE_CONFIG.copy()
args['mode'] = 'train'
args['train_file'] = _read_conllu_arg(train, feature_config)
args['eval_file'] = _read_conllu_arg(dev, feature_config)
args['gold_file'] = _read_conllu_arg(dev, feature_config, gold=True)
if save_name:
args['save_name'] = save_name
parser.train(args)
def test(test, save_name=None):
"""Evaluate using an existing stanza model.
:param test: either a conllu string or a path to a conllu file
:param save_name: optional, a name for your model's save file, which will appear in 'stanza_models/'
"""
args = DEFAULT_PARSER_ARGS.copy()
feature_config = FEATURE_CONFIG.copy()
args['mode'] = "predict"
args['eval_file'] = _read_conllu_arg(test, feature_config)
args['gold_file'] = _read_conllu_arg(test, feature_config, gold=True)
if save_name:
args['save_name'] = save_name
return parser.evaluate(args)
class Predictor:
"""Wrapper class so model can sit in memory for multiple predictions"""
def __init__(self, args=None, feature_config=None):
if args is None:
args = DEFAULT_PARSER_ARGS.copy()
if feature_config is None:
self.feature_config = FEATURE_CONFIG.copy()
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
# load pretrain; note that we allow the pretrain_file to be non-existent
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
self.pretrain = Pretrain(pretrain_file)
# load model
print("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
self.trainer = Trainer(pretrain=self.pretrain, model_file=model_file, use_cuda=use_cuda)
self.loaded_args, self.vocab = self.trainer.args, self.trainer.vocab
self.batch_size = args['batch_size']
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
self.loaded_args[k] = args[k]
def predict(self, eval_file_or_string):
eval_file = _read_conllu_arg(eval_file_or_string, self.feature_config, predict=True)
doc = Document(CoNLL.conll2dict(input_file=eval_file))
batch = DataLoader(
doc,
self.batch_size,
self.loaded_args,
self.pretrain,
vocab=self.vocab,
evaluation=True,
sort_during_eval=True
)
preds = []
if len(batch) > 0:
for i, b in enumerate(batch):
preds += self.trainer.predict(b)
preds = utils.unsort(preds, batch.data_orig_idx)
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
doc_conll = CoNLL.convert_dict(batch.doc.to_dict())
conll_string = CoNLL.conll_as_string(doc_conll)
return conll_string
def _hyperparam_search():
args = DEFAULT_PARSER_ARGS.copy()
def trial(args):
train(args)
las = test(args)
return las
# most trials seem to converge by 6000
args['max_steps'] = 6000
from hyperopt import hp, fmin, Trials, STATUS_OK, tpe
from hyperopt.pyll import scope
# params to search for
space = {
'optim': hp.choice('optim', ['sgd', 'adagrad', 'adam', 'adamax']),
'hidden_dim': scope.int(hp.quniform('hidden_dim', 150, 400, 50)),
}
# f to minimize
def f(opted_args):
new_args = args.copy()
new_args.update(opted_args)
print("Trial with args:", opted_args)
return {'loss': 1 - trial(new_args), 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space, algo=tpe.suggest, max_evals=200, trials=trials)
print("\nBest parameters:\n" + 30 * "=")
print(best)
trials = [t for t in trials]
print("\n\nRaw trial output")
for tt in trials:
print(tt)
print("\n\n")
print("\nTrials:\n")
for i, tt in enumerate(trials):
if i == 0:
print("LAS\t" + "\t".join(list(tt['misc']['vals'].keys())))
vals = map(lambda x: str(x[0]), tt['misc']['vals'].values())
las = str(1 - tt['result']['loss'])
print('\t'.join([las, "\t".join(vals)]))
| 35.320628
| 120
| 0.599314
| 2,004
| 15,753
| 4.513972
| 0.221058
| 0.041676
| 0.013929
| 0.013266
| 0.234579
| 0.195888
| 0.180853
| 0.126907
| 0.086226
| 0.086226
| 0
| 0.011084
| 0.266933
| 15,753
| 445
| 121
| 35.4
| 0.772255
| 0.142386
| 0
| 0.159639
| 0
| 0
| 0.129719
| 0.014372
| 0
| 0
| 0
| 0
| 0.006024
| 1
| 0.051205
| false
| 0
| 0.054217
| 0
| 0.156627
| 0.03012
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ebca1d1f1083aaf12c3d720e9e95e6ae2564e1
| 11,432
|
py
|
Python
|
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
diff.py
|
JohnAgapeyev/binary_diff
|
4e8a1eb9540af134375f171e4a5a8781d042043d
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
import os
import getopt
import csv
import json
import itertools
import zipfile
import tarfile
import binwalk
import collections
from heapq import nsmallest
from collections import defaultdict
import tlsh
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool
from sklearn.cluster import *
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
pool = Pool()
def usage():
print("python3 ./diff.py [file directory] [metadata file]")
def from_matrix_to_vector(i, j, N):
if i <= j:
return i * N - (i - 1) * i / 2 + j - i
else:
return j * N - (j - 1) * j / 2 + i - j
def partition_hashes(hash_list, file_list):
output = {}
for h in hash_list:
filename = file_list[hash_list.index(h)]
quartile_range = int(h[8:10], 16)
if quartile_range not in output:
output[quartile_range] = [(filename, h)]
else:
output[quartile_range].append((filename, h))
return output
#Values are from the TLSH paper
def convert_dist_to_confidence(d):
if d < 30:
return 99.99819
elif d < 40:
return 99.93
elif d < 50:
return 99.48
elif d < 60:
return 98.91
elif d < 70:
return 98.16
elif d < 80:
return 97.07
elif d < 90:
return 95.51
elif d < 100:
return 93.57
elif d < 150:
return 75.67
elif d < 200:
return 49.9
elif d < 250:
return 30.94
elif d < 300:
return 20.7
else:
return 0
def lsh_json(data):
filename = data[0]
meta = []
print(filename)
if not data[1] or data[1] == None:
pass
else:
stuff = [d for d in data[1] if d['filename'] == os.path.basename(filename)]
if stuff:
if len(stuff) >= 1:
stuff = stuff[0]
[meta.extend([k,v]) for k,v in stuff.items()]
[meta.extend([k,v]) for k,v in meta[3].items()]
del meta[3]
[meta.extend([k,v]) for k,v in meta[-1].items()]
del meta[-3]
[meta.extend([k,v]) for k,v in meta[-4].items()]
del meta[-6]
if os.path.getsize(filename) < 256:
raise ValueError("{} must be at least 256 bytes".format(filename))
if tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as tar:
for member in tar.getmembers():
if not member or member.size < 256:
continue
try:
meta.append(tlsh.hash(tar.extractfile(member).read()))
if use_binwalk:
for module in binwalk.scan(tar.extractfile(member).read(), signature=True, quiet=True):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
except:
continue
elif zipfile.is_zipfile(filename):
try:
with zipfile.ZipFile(filename) as z:
for member in z.infolist():
if not member or member.file_size < 256:
continue
try:
with z.read(member) as zipdata:
meta.append(tlsh.hash(zipdata))
if use_binwalk:
for module in binwalk.scan(zipdata):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
except:
continue
except:
pass
if use_binwalk:
for module in binwalk.scan(filename, signature=True, quiet=True):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
file_hash = tlsh.hash(open(filename, 'rb').read())
if not meta:
return file_hash
else:
return tlsh.hash(str.encode(file_hash + ''.join(map(str, meta))))
def diff_hash(one, two):
return tlsh.diff(one, two)
def list_files(directory):
f = []
for (dirpath, _, filenames) in os.walk(directory):
for name in filenames:
f.append(os.path.join(dirpath, name))
return f
def parse_metadata(filename):
contents = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
#Remove the md5 and sha1 hashes since they're useless to me
contents.append(row[:-2])
return contents[1:]
def parse_metadata_json(filename):
with open(filename, 'r') as jsonfile:
metadata = json.load(jsonfile)
for obj in metadata:
del obj['MD5']
del obj['SHA1']
del obj['SHA256']
del obj['SHA512']
obj['filename'] = obj['Properties'].pop('FileName')
return metadata
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_n_closest(n, filenames, adjacency):
closest = {}
for f in filenames:
elem = adj[filenames.index(f)]
smallest_dists = nsmallest(n + 1, elem)
smallest_files = []
old_dist = 0
for d in smallest_dists:
#Ignore the file listing itself
if d == 0:
continue
elif d == old_dist:
continue
old_dist = d
if smallest_dists.count(d) > 1:
prev = 0
for i in range(smallest_dists.count(d)):
dist_filename = smallest_dists.index(d, prev)
smallest_files.append((d, filenames[dist_filename]))
prev = dist_filename + 1
continue;
#Filename indices are analagous to adjacency indices
smallest_files.append((d, filenames[smallest_dists.index(d)]))
closest[f] = smallest_files
return closest
def get_partition_entry(partition_hashes, new_hash):
return partition_hashes[int(new_hash[8:10], 16)]
def get_n_closest_partial(n, hash_partition, hash_list):
closest = {}
for h in hash_list:
entry = get_partition_entry(hash_partition, h)
elem = []
filename = ""
for k,v in entry:
d = diff_hash(h, v)
if d > 0:
elem.append((d, k))
else:
filename = k
elem.sort(key=lambda tup: tup[0])
smallest_files = []
for i in range(len(elem)):
if i + 1 > n:
break
smallest_files.append(elem[i])
closest[filename] = smallest_files
return closest
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:bn:t", ["help", "directory", "metadata", "binwalk", "number", "test"])
except getopt.GetoptError as err:
print(err) # will print something like "option -a not recognized"
usage()
exit(2)
directory = ""
meta = ""
use_binwalk = False
n = 10
use_existing = False
for o, a in opts:
if o in ("-d", "--directory"):
directory = a
elif o in ("-h", "--help"):
usage()
exit()
elif o in ("-m", "--metadata"):
meta = a
elif o in ("-b", "--binwalk"):
use_binwalk = True
elif o in ("-n", "--number"):
n = int(a)
elif o in ("-t", "--test"):
use_existing = True
if not directory:
print("Program must be provided a file directory path")
exit(1)
file_list = list_files(directory)
hash_list = []
if meta:
meta_contents = parse_metadata_json(meta)
else:
meta_contents = None
hash_list = [lsh_json(x) for x in zip(file_list, itertools.repeat(meta_contents))]
if use_existing:
file_data = np.load(".tmp.npz")
#See https://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez for why this syntax is needed
clustered_files = file_data['clusters'][()]
cluster_hashes = file_data['hash_list']
ms = joblib.load('.tmp2.pkl')
adj = np.zeros((len(hash_list), len(cluster_hashes)), int)
#Compare new file hashes against saved data to get distances
for i in range(len(hash_list)):
for j in range(len(cluster_hashes)):
adj[i][j] = diff_hash(hash_list[i], cluster_hashes[j]);
cluster_labels = ms.predict(adj)
for f in file_list:
#Label of the prediucted file cluster
lab = cluster_labels[file_list.index(f)]
if lab not in clustered_files:
print("{} does not belong to any existing cluster".format(f))
continue
clus = clustered_files[lab]
print("Target file {} is in cluster {}".format(f, lab))
for c in clus:
print(c)
#Empty line to separate cluster print outs
print()
exit()
else:
adj = np.zeros((len(hash_list), len(hash_list)), int)
for i in range(len(hash_list)):
for j in range(len(hash_list)):
d = diff_hash(hash_list[i], hash_list[j]);
adj[i][j] = d
adj[j][i] = d
best_cluster_count = 0
best_silhouette_score = -1.0
def cl(data):
i, adj = data
print("Trying cluster count {}".format(i))
return metrics.silhouette_score(adj, MiniBatchKMeans(n_clusters=i).fit_predict(adj))
#Calculate the best cluster count in parallel
silhouette_list = Pool().map(cl, zip(range(2, 16), itertools.repeat(adj)))
best_cluster_count = silhouette_list.index(max(silhouette_list)) + 2
ms = MiniBatchKMeans(n_clusters=best_cluster_count)
cluster_labels = ms.fit_predict(adj)
clustered_files = {}
for f in file_list:
lab = cluster_labels[file_list.index(f)]
if lab in clustered_files:
clustered_files[lab].append(f)
else:
clustered_files[lab] = [f]
print(clustered_files)
np.savez(".tmp", clusters=clustered_files, hash_list=hash_list)
joblib.dump(ms, '.tmp2.pkl')
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
plt.figure(1)
plt.clf()
colors = itertools.cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(adj[my_members, 0], adj[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| 30.485333
| 141
| 0.571641
| 1,478
| 11,432
| 4.307172
| 0.226658
| 0.023877
| 0.018379
| 0.026861
| 0.17028
| 0.125039
| 0.125039
| 0.117499
| 0.098492
| 0.083883
| 0
| 0.021536
| 0.317617
| 11,432
| 374
| 142
| 30.566845
| 0.794514
| 0.048198
| 0
| 0.190939
| 0
| 0
| 0.048947
| 0.002576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045307
| false
| 0.006472
| 0.067961
| 0.006472
| 0.200647
| 0.035599
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ecc85140eb083700cb75cd12da34a931dcc1e5
| 3,132
|
py
|
Python
|
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
proyecto_opti.py
|
rafaelfrieri1/Optimization-Project
|
20db5200cd361d358e213310c6eb2997c893ff27
|
[
"MIT"
] | null | null | null |
from pyomo.environ import *
import numpy as np
cijr = []
fjr = []
with open("./Test_Instances/RAND2000_120-80.txt") as instanceFile:
n = int(instanceFile.readline())
clientsFacilitiesSizeStr = instanceFile.readline().strip().split(" ")
instanceFile.readline()
for phase in range(2):
for i in range(1, len(clientsFacilitiesSizeStr)):
if phase == 0:
fjr.append([])
for j in range(int(clientsFacilitiesSizeStr[i])):
fjr[i-1].append(float(instanceFile.readline().strip().split(" ")[0]))
else:
nextLine = instanceFile.readline().strip()
cijr.append([])
j = 0
while(nextLine != ""):
nextLineNumbers = nextLine.split(" ")
cijr[i-1].append([])
for nextLinenumber in nextLineNumbers:
cijr[i-1][j].append(float(nextLinenumber))
j+=1
nextLine = instanceFile.readline().strip()
instanceFile.readline()
instanceFile.close()
#cijr = np.array(
# [
# np.array([
# [1,1000,1000],
# [15000,2,15000],
# [20000,20000,3],
# [1,40000,40000]
# ]),
# np.array([
# [10000,5,10000,10000,20000],
# [30000,23000,3,24000,18000],
# [28000,35000,21000,4,33000]
# ]),
# np.array([
# [16, 14],
# [2, 18000],
# [20000, 3],
# [20000, 2],
# [24, 25]
# ])
# ]
#)
#fjr = np.array(
# [
# [10,15,20],
# [17,20,25,30,18],
# [48,50]
# ]
#)
I = range(len(cijr[0]))
J = range(len(cijr[0][0]))
R = range(1, len(fjr)+1)
RM1 = range(1, len(fjr))
yjrIndexes = []
zirabIndexes = []
for r in R:
for j in range(len(fjr[r-1])):
yjrIndexes.append((r,j))
for i in I:
for r in RM1:
for a in range(len(cijr[r])):
for b in range(len(cijr[r][0])):
zirabIndexes.append((i,r,a,b))
model = ConcreteModel()
model.vij1 = Var(I, J, domain=Binary)
model.yjr = Var(yjrIndexes, domain=Binary)
model.zirab = Var(zirabIndexes, domain=Binary)
model.constraints = ConstraintList()
for i in I:
model.constraints.add(sum(model.vij1[i, j] for j in J) == 1)
for j1 in J:
model.constraints.add(sum(model.zirab[i,1,j1,b] for b in range(len(cijr[1][0]))) == model.vij1[i,j1])
model.constraints.add(model.vij1[i,j1] <= model.yjr[1,j1])
for r in range(2, len(fjr) + 1):
if(r <= len(fjr) - 1):
for a in range(len(cijr[r])):
model.constraints.add(sum(model.zirab[i,r,a,b] for b in range(len(cijr[r][0]))) == sum(model.zirab[i,r-1,bp,a] for bp in range(len(cijr[r-1]))))
for b in range(len(cijr[r-1][0])):
model.constraints.add(sum(model.zirab[i,r-1,a,b,] for a in range(len(cijr[r-1]))) <= model.yjr[r, b])
model.objective = Objective(
expr = sum(sum(cijr[0][i][j1]*model.vij1[i,j1] for j1 in J) for i in I) + sum(sum(sum(sum(cijr[r][a][b]*model.zirab[i,r,a,b] for b in range(len(cijr[r][0])))for a in range(len(cijr[r]))) for r in RM1) for i in I) + sum(sum(fjr[r-1][j]*model.yjr[r,j] for j in range(len(fjr[r-1]))) for r in R),
sense=minimize
)
results = SolverFactory('cplex').solve(model)
results.write()
#if results.solver.status:
# model.pprint()
#model.constraints.display()
| 28.472727
| 295
| 0.590358
| 495
| 3,132
| 3.731313
| 0.212121
| 0.060639
| 0.077964
| 0.075799
| 0.258798
| 0.212777
| 0.179751
| 0.120736
| 0.036816
| 0.036816
| 0
| 0.085415
| 0.207535
| 3,132
| 110
| 296
| 28.472727
| 0.658743
| 0.155811
| 0
| 0.126984
| 0
| 0
| 0.016839
| 0.013777
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031746
| 0
| 0.031746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93f0a5f958369eb4430a00c36a168b1783fda002
| 735
|
py
|
Python
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 43
|
2020-07-31T14:38:06.000Z
|
2022-03-07T11:28:28.000Z
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 322
|
2020-07-23T19:38:26.000Z
|
2022-03-31T19:15:45.000Z
|
portal/middleware.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 6
|
2020-11-28T19:30:20.000Z
|
2021-07-29T18:06:55.000Z
|
import pytz
from django.conf import settings
class TZMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
browser_tz = request.COOKIES.get("browserTimezone")
tz = None
if browser_tz:
try:
tz = pytz.timezone(browser_tz)
except pytz.UnknownTimeZoneError:
pass
if not tz:
tz = pytz.timezone(settings.PORTAL_LOCAL_TZ)
def convert_to_local_tz_from_utc(utc_dttm):
return utc_dttm.astimezone(tz=tz)
request.convert_to_local_tz_from_utc = convert_to_local_tz_from_utc
response = self.get_response(request)
return response
| 27.222222
| 75
| 0.642177
| 89
| 735
| 4.921348
| 0.393258
| 0.100457
| 0.10274
| 0.109589
| 0.157534
| 0.157534
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293878
| 735
| 26
| 76
| 28.269231
| 0.843931
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0.05
| 0.1
| 0.05
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93f3149ab4cf735ff8855c62f4a02835c7b351e6
| 483
|
py
|
Python
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | 1
|
2021-04-06T16:52:36.000Z
|
2021-04-06T16:52:36.000Z
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | 17
|
2021-05-30T17:06:48.000Z
|
2021-09-26T08:20:02.000Z
|
app/main.py
|
cesko-digital/newschatbot
|
4f47d7902433bff09b48fcebcf9ee8422eb0ec7e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_migrate import Migrate
from app.model import db
from app.controller import api
app = Flask(__name__)
app.register_blueprint(api)
app.config[
"SQLALCHEMY_DATABASE_URI"
] = "postgresql://newschatbotdevelopment:Wlk8skrHKvZEbM6Gw@database.internal.newschatbot.ceskodigital.net:5432/newschatbotdevelopment"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
migrate = Migrate(app, db)
db.init_app(app)
if __name__ == "__main__":
app.run()
| 25.421053
| 134
| 0.797101
| 60
| 483
| 6.1
| 0.516667
| 0.04918
| 0.103825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013857
| 0.10352
| 483
| 18
| 135
| 26.833333
| 0.831409
| 0
| 0
| 0
| 0
| 0
| 0.391304
| 0.374741
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93f51b485662f69b94bcc6b67cecfbb6633cdc40
| 2,887
|
py
|
Python
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 20
|
2021-12-06T10:41:54.000Z
|
2022-03-13T16:25:43.000Z
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 33
|
2021-12-06T08:27:18.000Z
|
2022-03-14T05:07:53.000Z
|
examples/siha/sleep_intraday_dataset.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 2
|
2022-02-07T08:06:48.000Z
|
2022-02-14T07:13:42.000Z
|
"""Example on how to read sleep data from SIHA
"""
import os
from tasrif.data_readers.siha_dataset import SihaDataset
from tasrif.processing_pipeline import SequenceOperator
from tasrif.processing_pipeline.custom import JqOperator
from tasrif.processing_pipeline.pandas import (
ConvertToDatetimeOperator,
JsonNormalizeOperator,
SetIndexOperator,
)
siha_folder_path = os.environ.get("SIHA_PATH")
pipeline = SequenceOperator(
[
SihaDataset(siha_folder_path, table_name="Data"),
JqOperator(
"map({patientID} + (.data.sleep[].data as $data | "
+ "($data.sleep | map(.) | .[]) | . * {levels: {overview : ($data.summary//{})}})) | "
+ "map (if .levels.data != null then . else .levels += {data: []} end) | "
+ "map(. + {type, dateOfSleep, minutesAsleep, logId, startTime, endTime, duration, isMainSleep,"
+ " minutesToFallAsleep, minutesAwake, minutesAfterWakeup, timeInBed, efficiency, infoCode})"
),
JsonNormalizeOperator(
record_path=["levels", "data"],
meta=[
"patientID",
"logId",
"dateOfSleep",
"startTime",
"endTime",
"duration",
"isMainSleep",
"minutesToFallAsleep",
"minutesAsleep",
"minutesAwake",
"minutesAfterWakeup",
"timeInBed",
"efficiency",
"type",
"infoCode",
["levels", "summary", "deep", "count"],
["levels", "summary", "deep", "minutes"],
["levels", "summary", "deep", "thirtyDayAvgMinutes"],
["levels", "summary", "wake", "count"],
["levels", "summary", "wake", "minutes"],
["levels", "summary", "wake", "thirtyDayAvgMinutes"],
["levels", "summary", "light", "count"],
["levels", "summary", "light", "minutes"],
["levels", "summary", "light", "thirtyDayAvgMinutes"],
["levels", "summary", "rem", "count"],
["levels", "summary", "rem", "minutes"],
["levels", "summary", "rem", "thirtyDayAvgMinutes"],
["levels", "overview", "totalTimeInBed"],
["levels", "overview", "totalMinutesAsleep"],
["levels", "overview", "stages", "rem"],
["levels", "overview", "stages", "deep"],
["levels", "overview", "stages", "light"],
["levels", "overview", "stages", "wake"],
],
errors="ignore",
),
ConvertToDatetimeOperator(
feature_names=["dateTime"], infer_datetime_format=True
),
SetIndexOperator("dateTime"),
]
)
df = pipeline.process()
print(df)
| 38.493333
| 108
| 0.512643
| 206
| 2,887
| 7.11165
| 0.378641
| 0.106485
| 0.049147
| 0.057338
| 0.07372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.32629
| 2,887
| 74
| 109
| 39.013514
| 0.753213
| 0.014894
| 0
| 0.044776
| 0
| 0.029851
| 0.375749
| 0.008107
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074627
| 0
| 0.074627
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93f8e49e11b7653fd863536bebeb07d2b758a06e
| 12,788
|
py
|
Python
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | 5
|
2021-05-19T22:56:40.000Z
|
2022-03-29T15:25:03.000Z
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | 32
|
2021-05-10T07:58:57.000Z
|
2022-03-01T08:02:11.000Z
|
tests/data/long_statement_strings.py
|
aalto-speech/fi-parliament-tools
|
c40ab81a23c661765c380238cbf10acf733d94d4
|
[
"MIT"
] | null | null | null |
"""Long statement strings and other space consuming data definitions for testing are declared here.
This is done to avoid clutter in main test files.
"""
from typing import Dict
from typing import List
from typing import Tuple
import pytest
from _pytest.fixtures import SubRequest
from fi_parliament_tools.parsing.data_structures import MP
chairman_texts = [
"Ilmoitetaan, että valiokuntien ja kansliatoimikunnan vaalit toimitetaan ensi tiistaina 5. "
"päivänä toukokuuta kello 14 pidettävässä täysistunnossa. Ehdokaslistat näitä vaaleja varten "
"on jätettävä keskuskansliaan viimeistään ensi maanantaina 4. päivänä toukokuuta kello 12.",
"Toimi Kankaanniemen ehdotus 5 ja Krista Kiurun ehdotus 6 koskevat samaa asiaa, joten ensin "
"äänestetään Krista Kiurun ehdotuksesta 6 Toimi Kankaanniemen ehdotusta 5 vastaan ja sen "
"jälkeen voittaneesta mietintöä vastaan.",
"Kuhmosta oleva agrologi Tuomas Kettunen, joka varamiehenä Oulun vaalipiiristä on "
"tullut Antti Rantakankaan sijaan, on tänään 28.11.2019 esittänyt puhemiehelle "
"edustajavaltakirjansa ja ryhtynyt hoitamaan edustajantointaan.",
]
speaker_texts = [
"Arvoisa puhemies! Hallituksen esityksen mukaisesti on varmasti hyvä jatkaa määräaikaisesti "
"matkapuhelinliittymien telemarkkinointikieltoa. Kukaan kansalainen ei ole kyllä ainakaan "
"itselleni valittanut siitä, että enää eivät puhelinkauppiaat soittele kotiliittymiin ja "
"‑puhelimiin, ja myös operaattorit ovat olleet kohtuullisen tyytyväisiä tähän kieltoon. "
"Ongelmia on kuitenkin muussa puhelinmyynnissä ja telemarkkinoinnissa. Erityisesti "
"nettiliittymien puhelinmyynnissä on ongelmia. On aggressiivista myyntiä, ja ihmisillä on "
"epätietoisuutta siitä, mitä he ovat lopulta ostaneet. Lisäksi mielestäni on ongelmallista "
"rajata vain puhelinliittymät telemarkkinointikiellon piiriin, kun viestintä- ja "
"mobiilipalveluiden puhelinkauppa on laajempi aihe ja se on laajempi ongelma ja ongelmia on "
"tosiaan tässä muidenkin tyyppisten sopimusten myynnissä. Tämä laki tämänsisältöisenä on "
"varmasti ihan hyvä, ja on hyvä määräaikaisesti jatkaa tätä, mutta näkisin, että sitten kun "
"tämä laki on kulumassa umpeen, meidän on palattava asiaan ja on tehtävä joku lopullisempi "
"ratkaisu tästä telemarkkinoinnista. Ei voida mennä tällaisen yhden sopimusalan "
"määräaikaisuudella eteenpäin. Meidän täytyy tehdä ratkaisut, jotka ovat laajempia ja jotka "
"koskevat viestintä-, tele- ja mobiilisopimusten puhelinmyyntiä laajemmin ja muutenkin "
"puhelinmyynnin pelisääntöjä laajemmin. Varmaankin paras ratkaisu olisi se, että jatkossa "
"puhelimessa tehty ostos pitäisi varmentaa kirjallisesti esimerkiksi sähköpostilla, "
"tekstiviestillä tai kirjeellä. Meidän on ratkaistava jossain vaiheessa nämä puhelinmyynnissä "
"olevat ongelmat ja käsiteltävä asia kokonaisvaltaisesti. — Kiitos. (Hälinää)",
"Arvoisa puhemies! Pienen, vastasyntyneen lapsen ensimmäinen ote on samaan aikaan luja ja "
"hento. Siihen otteeseen kiteytyy paljon luottamusta ja vastuuta. Luottamusta siihen, että "
"molemmat vanhemmat ovat läsnä lapsen elämässä. Vastuuta siitä, että huominen on aina "
"valoisampi. Luottamus ja vastuu velvoittavat myös meitä päättäjiä. Tämän hallituksen "
"päätökset eivät perheiden kannalta ole olleet kovin hääppöisiä. Paljon on leikattu perheiden "
"arjesta, mutta toivon kipinä heräsi viime vuonna, kun hallitus ilmoitti, että se toteuttaa "
"perhevapaauudistuksen. Viime perjantaina hallituksen perheministeri kuitenkin yllättäen "
"ilmoitti, että hän keskeyttää tämän uudistuksen. Vielä suurempi hämmästys oli se syy, jonka "
"takia tämä keskeytettiin. Ministeri ilmoitti, että valmistellut mallit olisivat olleet "
"huonoja suomalaisille perheille. Perheministeri Saarikko, kun te olette vastuussa tämän "
"uudistuksen valmistelusta, niin varmasti suomalaisia perheitä kiinnostaisi tietää, miksi te "
"valmistelitte huonoja malleja.",
"Arvoisa puhemies! Lämpimät osanotot omasta ja perussuomalaisten eduskuntaryhmän "
"puolesta pitkäaikaisen kansanedustajan Maarit Feldt-Rannan omaisille ja läheisille. "
"Nuorten mielenterveysongelmat ovat vakava yhteiskunnallinen ongelma. "
"Mielenterveysongelmat ovat kasvaneet viime vuosina räjähdysmäisesti, mutta "
"terveydenhuoltoon ei ole lisätty vastaavasti resursseja, vaan hoitoonpääsy on "
"ruuhkautunut. Masennuksesta kärsii jopa 15 prosenttia nuorista, ahdistuneisuudesta 10 "
"prosenttia, ja 10—15 prosentilla on toistuvia itsetuhoisia ajatuksia. Monet näistä "
"ongelmista olisivat hoidettavissa, jos yhteiskunta ottaisi asian vakavasti. Turhan "
"usein hoitoon ei kuitenkaan pääse, vaan nuoret jätetään heitteille. Kysyn: mihin "
"toimiin hallitus ryhtyy varmistaakseen, että mielenterveysongelmista kärsiville "
"nuorille on tarjolla heidän tarvitsemansa hoito silloin kun he sitä tarvitsevat?",
]
speaker_lists = [
[
(1301, "Jani", "Mäkelä", "ps", ""),
(1108, "Juha", "Sipilä", "", "Pääministeri"),
(1301, "Jani", "Mäkelä", "ps", ""),
(1108, "Juha", "Sipilä", "", "Pääministeri"),
(1141, "Peter", "Östman", "kd", ""),
(947, "Petteri", "Orpo", "", "Valtiovarainministeri"),
(1126, "Tytti", "Tuppurainen", "sd", ""),
(1108, "Juha", "Sipilä", "", "Pääministeri"),
(1317, "Simon", "Elo", "sin", ""),
(1108, "Juha", "Sipilä", "", "Pääministeri"),
],
[
(1093, "Juho", "Eerola", "ps", ""),
(1339, "Kari", "Kulmala", "sin", ""),
(887, "Sirpa", "Paatero", "sd", ""),
(967, "Timo", "Heinonen", "kok", ""),
],
[
(971, "Johanna", "Ojala-Niemelä", "sd", ""),
(1129, "Arja", "Juvonen", "ps", ""),
(1388, "Mari", "Rantanen", "ps", ""),
(1391, "Ari", "Koponen", "ps", ""),
(1325, "Sari", "Tanus", "kd", ""),
(971, "Johanna", "Ojala-Niemelä", "sd", ""),
],
]
chairman_statements = [
{
"type": "C",
"mp_id": 0,
"firstname": "Mauri",
"lastname": "Pekkarinen",
"party": "",
"title": "Ensimmäinen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Ainoaan käsittelyyn esitellään päiväjärjestyksen 4. asia. Käsittelyn pohjana on "
"talousvaliokunnan mietintö TaVM 18/2016 vp.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
{
"type": "C",
"mp_id": 0,
"firstname": "Mauri",
"lastname": "Pekkarinen",
"party": "",
"title": "Ensimmäinen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Toiseen käsittelyyn esitellään päiväjärjestyksen 3. asia. Keskustelu asiasta "
"päättyi 6.6.2017 pidetyssä täysistunnossa. Keskustelussa on Anna Kontula Matti Semin "
"kannattamana tehnyt vastalauseen 2 mukaisen lausumaehdotuksen.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
{
"type": "C",
"mp_id": 0,
"firstname": "Tuula",
"lastname": "Haatainen",
"party": "",
"title": "Toinen varapuhemies",
"start_time": "",
"end_time": "",
"language": "",
"text": "Toiseen käsittelyyn esitellään päiväjärjestyksen 6. asia. Nyt voidaan hyväksyä "
"tai hylätä lakiehdotukset, joiden sisällöstä päätettiin ensimmäisessä käsittelyssä.",
"offset": -1.0,
"duration": -1.0,
"embedded_statement": {
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
},
]
embedded_statements = [
{
"mp_id": 0,
"title": "Puhemies",
"firstname": "Maria",
"lastname": "Lohela",
"language": "",
"text": "Edustaja Laukkanen, ja sitten puhujalistaan.",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "",
"firstname": "",
"lastname": "",
"language": "",
"text": "",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "Ensimmäinen varapuhemies",
"firstname": "Mauri",
"lastname": "Pekkarinen",
"language": "",
"text": "Tämä valtiovarainministerin puheenvuoro saattaa antaa aihetta muutamaan "
"debattipuheenvuoroon. Pyydän niitä edustajia, jotka haluavat käyttää vastauspuheenvuoron, "
"nousemaan ylös ja painamaan V-painiketta.",
"offset": -1.0,
"duration": -1.0,
},
{
"mp_id": 0,
"title": "Ensimmäinen varapuhemies",
"firstname": "Antti",
"lastname": "Rinne",
"language": "",
"text": "Meillä on puoleenyöhön vähän reilu kolme tuntia aikaa, ja valtioneuvoston pitää "
"sitä ennen soveltamisasetus saattaa voimaan. Pyydän ottamaan tämän huomioon "
"keskusteltaessa.",
"offset": -1.0,
"duration": -1.0,
},
]
mps = [
MP(
103,
"Matti",
"Ahde",
"o",
"fi",
1945,
"Sosialidemokraattinen eduskuntaryhmä",
"",
"",
"Oulu",
"Oulun läänin vaalipiiri (03/1970-06/1990), Oulun vaalipiiri (03/2003-04/2011)",
"kansakoulu, ammattikoulu, kansankorkeakoulu",
),
MP(
1432,
"Marko",
"Kilpi",
"m",
"fi",
1969,
"Parliamentary Group of the National Coalition Party",
"police officer, writer",
"Kuopio",
"Rovaniemi",
"Electoral District of Savo-Karelia (04/2019-)",
"Degree in policing",
),
MP(
1374,
"Veronica",
"Rehn-Kivi",
"f",
"sv",
1956,
"Swedish Parliamentary Group",
"architect, building supervision manager",
"Kauniainen",
"Helsinki",
"Electoral District of Uusimaa (08/2016-)",
"architect",
),
MP(
1423,
"Iiris",
"Suomela",
"f",
"fi",
1994,
"Green Parliamentary Group",
"student of social sciences",
"Tampere",
"",
"Electoral District of Pirkanmaa (04/2019-)",
"",
),
]
@pytest.fixture
def true_chairman_text(request: SubRequest) -> str:
"""Return a long chairman statement for testing from a list at the top of the file."""
index: int = request.param
return chairman_texts[index]
@pytest.fixture
def true_speaker_text(request: SubRequest) -> str:
"""Return a long speaker statement for testing from a list at the top of the file."""
index: int = request.param
return speaker_texts[index]
@pytest.fixture
def true_speaker_list(request: SubRequest) -> List[Tuple[int, str, str, str, str]]:
"""Return a list of speakers for testing from a list at the top of the file."""
index: int = request.param
return speaker_lists[index]
@pytest.fixture
def true_chairman_statement(request: SubRequest) -> Dict[str, object]:
"""Return a chairman statement for testing from a list at the top of the file."""
index: int = request.param
return chairman_statements[index]
@pytest.fixture
def true_embedded_statement(request: SubRequest) -> Dict[str, object]:
"""Return an embedded statement for testing from a list at the top of the file."""
index: int = request.param
return embedded_statements[index]
@pytest.fixture
def true_mp(request: SubRequest) -> MP:
"""Return an MP data object for testing from a list at the top of the file."""
index: int = request.param
return mps[index]
@pytest.fixture
def interpellation_4_2017_text() -> str:
"""Read interpellation 4/2017 text transcript from a file.
Returns:
str: full interpellation statement as one very long string
"""
with open("tests/data/interpellation_4_2017_text.txt", "r", encoding="utf-8") as infile:
interpellation_text = infile.read().replace("\n", " ")
return interpellation_text.strip()
| 37.722714
| 100
| 0.626759
| 1,297
| 12,788
| 6.141866
| 0.508096
| 0.005021
| 0.006277
| 0.020085
| 0.224077
| 0.210394
| 0.199347
| 0.168968
| 0.158423
| 0.158423
| 0
| 0.026911
| 0.256099
| 12,788
| 338
| 101
| 37.83432
| 0.810155
| 0.057632
| 0
| 0.406667
| 0
| 0.003333
| 0.586862
| 0.025258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023333
| false
| 0
| 0.02
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93fe622a14e935745be6617c3d7a3da20bbb3012
| 578
|
py
|
Python
|
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | 1
|
2020-03-30T00:08:41.000Z
|
2020-03-30T00:08:41.000Z
|
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/fbs/_state.py
|
Acuf5928/check-
|
4b993e0bcee33434506565dab11ece3dfa9c5cab
|
[
"MIT"
] | 2
|
2018-12-29T07:49:59.000Z
|
2020-03-18T02:44:31.000Z
|
"""
This INTERNAL module is used to manage fbs's global state. Having it here, in
one central place, allows fbs's test suite to manipulate the state to test
various scenarios.
"""
from collections import OrderedDict
SETTINGS = {}
LOADED_PROFILES = []
COMMANDS = OrderedDict()
def get():
return dict(SETTINGS), list(LOADED_PROFILES), dict(COMMANDS)
def restore(settings, loaded_profiles, commands):
SETTINGS.clear()
SETTINGS.update(settings)
LOADED_PROFILES.clear()
LOADED_PROFILES.extend(loaded_profiles)
COMMANDS.clear()
COMMANDS.update(commands)
| 27.52381
| 77
| 0.749135
| 74
| 578
| 5.77027
| 0.567568
| 0.196721
| 0.154567
| 0.140515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155709
| 578
| 21
| 78
| 27.52381
| 0.875
| 0.295848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0.076923
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93feb2b5aaee509b3ca59bd657fd9239d3cc9aa4
| 5,234
|
py
|
Python
|
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/dao/RTKMatrix.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | 2
|
2020-04-03T04:14:42.000Z
|
2021-02-22T05:30:35.000Z
|
# -*- coding: utf-8 -*-
#
# rtk.dao.RTKMatrix.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
"""
===============================================================================
The RTKMatrix Table
===============================================================================
"""
# pylint: disable=E0401
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship # pylint: disable=E0401
# Import other RTK modules.
from Utilities import none_to_default # pylint: disable=E0401
from dao.RTKCommonDB import RTK_BASE # pylint: disable=E0401
class RTKMatrix(RTK_BASE):
"""
Class to represent the rtk_matrix table in the RTK Program database.
Matrix types are one of the following:
+-------------+--------------+--------------+
| Row Table | Column Table | Matrix Type |
+-------------+--------------+--------------+
| Function | Hardware | fnctn_hrdwr |
+-------------+--------------+--------------+
| Function | Software | fnctn_sftwr |
+-------------+--------------+--------------+
| Function | Validation | fnctn_vldtn |
+-------------+--------------+--------------+
| Requirement | Hardware | rqrmnt_hrdwr |
+-------------+--------------+--------------+
| Requirement | Software | rqrmnt_sftwr |
+-------------+--------------+--------------+
| Requirement | Validation | rqrmnt_vldtn |
+-------------+--------------+--------------+
| Hardware | Testing | hrdwr_tstng |
+-------------+--------------+--------------+
| Hardware | Validation | hrdwr_vldtn |
+-------------+--------------+--------------+
| Software | Risk | sftwr_rsk |
+-------------+--------------+--------------+
| Software | Validation | sftwr_vldtn |
+-------------+--------------+--------------+
The primary key for this table consists of the revision_id, matrix_id,
column_item_id, and row_item_id.
This table shares a Many-to-One relationship with rtk_revision.
"""
__tablename__ = 'rtk_matrix'
__table_args__ = {'extend_existing': True}
revision_id = Column(
'fld_revision_id',
Integer,
ForeignKey('rtk_revision.fld_revision_id'),
primary_key=True,
nullable=False)
matrix_id = Column('fld_matrix_id', Integer, primary_key=True, default=0)
column_id = Column('fld_column_id', Integer, default=0)
column_item_id = Column(
'fld_column_item_id', Integer, primary_key=True, default=0)
matrix_type = Column('fld_matrix_type', String(128), default='')
parent_id = Column('fld_parent_id', Integer, default=0)
row_id = Column('fld_row_id', Integer, default=0)
row_item_id = Column(
'fld_row_item_id', Integer, primary_key=True, default=0)
value = Column('fld_value', Integer, default=0)
# Define the relationships to other tables in the RTK Program database.
revision = relationship('RTKRevision', back_populates='matrix')
def get_attributes(self):
"""
Retrieve the current values of the RTKMatrix data model attributes.
:return: {revision_id, matrix_id, column_id, column_item_id, parent_id,
row_id, row_item_id, type_id, value} pairs.
:rtype: tuple
"""
_attributes = {
'revision_id': self.revision_id,
'matrix_id': self.matrix_id,
'column_id': self.column_id,
'column_item_id': self.column_item_id,
'matrix_type': self.matrix_type,
'parent_id': self.parent_id,
'row_id': self.row_id,
'row_item_id': self.row_item_id,
'value': self.value
}
return _attributes
def set_attributes(self, values):
"""
Set the RTKMatrix data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_error_code = 0
_msg = "RTK SUCCESS: Updating RTKMatrix {0:d} attributes.". \
format(self.matrix_id)
try:
self.column_id = int(none_to_default(values['column_id'], 0))
self.column_item_id = int(
none_to_default(values['column_item_id'], 0))
self.matrix_type = str(none_to_default(values['matrix_type'], ''))
self.parent_id = int(none_to_default(values['parent_id'], 0))
self.row_id = int(none_to_default(values['row_id'], 0))
self.row_item_id = int(none_to_default(values['row_item_id'], 0))
self.value = float(none_to_default(values['value'], 0.0))
except KeyError as _err:
_error_code = 40
_msg = "RTK ERROR: Missing attribute {0:s} in attribute " \
"dictionary passed to " \
"RTKMatrix.set_attributes().".format(_err)
return _error_code, _msg
| 40.261538
| 79
| 0.526175
| 540
| 5,234
| 4.824074
| 0.261111
| 0.036852
| 0.039923
| 0.051056
| 0.193858
| 0.094818
| 0.085605
| 0.026871
| 0
| 0
| 0
| 0.012095
| 0.257547
| 5,234
| 129
| 80
| 40.573643
| 0.65826
| 0.455865
| 0
| 0
| 0
| 0
| 0.186636
| 0.021121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0.017857
| 0.071429
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ff19152094c70f894a1b56b790e173ed1c2638
| 614
|
py
|
Python
|
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | null | null | null |
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | 8
|
2020-12-26T07:48:15.000Z
|
2022-03-12T00:25:14.000Z
|
tool/gitautopull.py
|
chaosannals/trial-python
|
740b91fa4b1b1b9839b7524515995a6d417612ca
|
[
"MIT"
] | null | null | null |
import os
import shutil
def pull_default(folder=None):
cwd = os.getcwd()
if None == folder:
folder = cwd
for path in os.listdir(folder):
project_path = os.path.join(folder, path)
if os.path.isdir(project_path):
dot_git_folder = os.path.join(project_path, '.git')
if os.path.isdir(dot_git_folder):
print('[git pull start] {}'.format(project_path))
os.chdir(project_path)
os.system('git pull')
print('[git pull end] {}'.format(project_path))
os.chdir(cwd)
pull_default()
input('按回车结束')
| 29.238095
| 65
| 0.583062
| 81
| 614
| 4.271605
| 0.345679
| 0.190751
| 0.150289
| 0.075145
| 0.138728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.286645
| 614
| 20
| 66
| 30.7
| 0.789954
| 0
| 0
| 0
| 0
| 0
| 0.086319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.166667
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
93ffdac053f4b224bf9ac1f85bcc5aea184dd502
| 9,300
|
py
|
Python
|
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
emit.py
|
richardbenson91477/simile
|
aa1faa8902d24e57133cd2c9982e5d4eef6f913f
|
[
"Unlicense"
] | null | null | null |
''' code emitters '''
import out, enums as e
class s:
''' state '''
# long_len
# arg_regs, arg_regs_n
# regs
# stack_regs
pass
def init (long_len):
s.long_len = long_len
if long_len == 8:
s.arg_regs = ['%rdi', '%rsi', '%rdx', '%rcx', 'r8', 'r9']
s.arg_regs_n = len(s.arg_regs)
s.regs = ['%rax', '%rbx', '%r10']
s.stack_regs = ['%rsp', '%rbp']
elif long_len == 4:
s.arg_regs = []
s.arg_regs_n = 0
s.regs = ['%eax', '%ebx', '%ecx']
s.stack_regs = ['%esp', '%ebp']
else:
out.error ('what year is this???')
return False
return True
def emit (fn_cur, et, val, val2 = None):
if et == e.EMIT_DEF:
out.put ('.section .text', i_n = 0)
out.put ('.globl ' + val, i_n = 0)
out.put (val + ':', i_n = 0)
out.put ('push ' + s.stack_regs [1])
out.put ('mov ' + s.stack_regs [0] + ', ' + s.stack_regs [1])
out.put ('xor ' + s.regs [0] + ', ' + s.regs [0])
elif et == e.EMIT_RET:
if val:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('pop ' + s.stack_regs [0])
out.put ('ret')
elif et == e.EMIT_END:
if not fn_cur.flow_ret_t:
out.put ('pop ' + s.stack_regs [1])
out.put ('ret')
if fn_cur.data_n:
out.put ('.section .data', i_n = 0)
for datum in fn_cur.data:
if datum._type == e.DATA_LONG:
out.put (datum.name_s + ': .zero ' + str(datum._len), i_n = 0)
elif datum._type == e.DATA_LARRAY:
out.put (datum.name_s + ': .zero ' + str(datum._len), i_n = 0)
elif datum._type == e.DATA_STR:
out.put (datum.name_s + ': .string ' + datum.val, i_n = 0)
elif et == e.EMIT_CALL:
arg_n = len (val2)
for arg_i, arg in enumerate (val2):
if arg_i < s.arg_regs_n:
if not get_val (fn_cur, arg, s.arg_regs [arg_i]):
return False
else:
if not get_val (fn_cur, arg, s.regs [0]):
return False
out.put ('push ' + s.regs [0])
out.put ('call ' + val)
if arg_n > s.arg_regs_n:
out.put ('add $' + str((arg_n - s.arg_regs_n) * s.long_len) +\
', ' + s.stack_regs [0])
elif et == e.EMIT_PUSH:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('push ' + s.regs [0])
elif et == e.EMIT_IF:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('test ' + s.regs [0] + ', ' + s.regs [0])
out.put ('jz ' + fn_cur.name_s + '.else.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
elif et == e.EMIT_ELSE:
out.put ('jmp ' + fn_cur.name_s + '.endif.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
out.put (fn_cur.name_s + '.else.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_ENDIF:
out.put (fn_cur.name_s + '.endif.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_WHILE:
out.put (fn_cur.name_s + '.while.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('test ' + s.regs [0] + ', ' + s.regs [0])
out.put ('jz ' + fn_cur.name_s + '.wend.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
elif et == e.EMIT_WEND:
out.put ('jmp ' + fn_cur.name_s + '.while.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
out.put (fn_cur.name_s + '.wend.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_ADD:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('add ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_SUB:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('sub ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_MUL:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('imul ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_DIV:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('cltd')
out.put ('idiv ' + s.regs [1])
elif et == e.EMIT_RES:
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_SET:
if not get_val (fn_cur, val2, s.regs [0]):
return False
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_ADDTO:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('add ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_SUBFROM:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('sub ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_MULTO:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('imul ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_DIVFROM:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('cltd')
out.put ('idiv ' + s.regs [1])
if not set_val (fn_cur, val):
return False
else:
out.put ('uknown emit type')
return False
return True
def get_val (fn_cur, val, reg):
val_type = get_val_type (val)
if not val_type:
out.error ('unknown val type "' + val + '"')
return False
elif val_type == e.VAL_LARRAY:
datum = fn_cur.def_data ('.l' + str(fn_cur.data_larray_n),\
e.DATA_LARRAY, val)
out.put ('mov ' + '$' + datum.name_s + ', ' + reg)
elif val_type == e.VAL_STR:
datum = fn_cur.def_data ('.s' + str(fn_cur.data_str_n),\
e.DATA_STR, val)
out.put ('mov ' + '$' + datum.name_s + ', ' + reg)
elif val_type == e.VAL_LONG:
out.put ('mov $' + val + ', ' + reg)
elif val_type == e.VAL_VAR:
arg_i = fn_cur.get_arg (val)
if arg_i:
arg_i -= 1
if arg_i < s.arg_regs_n:
_s = s.arg_regs [arg_i]
else:
_s = str((arg_i + 1) * s.long_len) + '(' + s.stack_regs [1] +\
')'
out.put ('mov ' + _s + ', ' + reg)
else:
var = fn_cur.get_or_def_var (val)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg)
elif val_type == e.VAL_VAR_DEREF:
_n = val [1:]
arg_i = fn_cur.get_arg (_n)
if arg_i:
# TODO support this
out.error ('dereferencing arg')
return False
else:
var = fn_cur.get_or_def_var (_n)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg)
out.put ('mov (' + reg + '), ' + reg)
return True
def set_val (fn_cur, val):
reg0 = s.regs [0]
reg2 = s.regs [2]
val_type = get_val_type (val)
if \
val_type == e.VAL_STR or\
val_type == e.VAL_LARRAY or\
val_type == e.VAL_LONG:
out.error ('can\'t assign to this type')
return False
elif val_type == e.VAL_VAR:
arg_i = fn_cur.get_arg (val)
if arg_i:
arg_i -= 1
if arg_i < s.arg_regs_n:
_s = s.arg_regs [arg_i]
else:
_s = str((arg_i + 1) * s.long_len) + '(' + s.stack_regs [1] +\
')'
out.put ('mov ' + reg0 + ', ' + _s)
else:
var = fn_cur.get_or_def_var (val)
if not var:
return False
out.put ('mov ' + reg0 + ', ' + var.datum.name_s)
elif val_type == e.VAL_VAR_DEREF:
_n = val [1:]
arg_i = fn_cur.get_arg (_n)
if arg_i:
out.error ('can\'t modify function arg')
return False
else:
var = fn_cur.get_or_def_var (_n)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg2)
out.put ('mov ' + reg0 + ', (' + reg2 + ')')
return True
def get_val_type (_s):
if not _s:
return e.VAL_NONE
elif _s [0] == '-' or _s.isdigit () or _s [0] == "'":
return e.VAL_LONG
elif _s [0] == '[':
return e.VAL_LARRAY
elif _s [0] == '"':
return e.VAL_STR
elif _s [0] == '@':
return e.VAL_VAR_DEREF
else:
return e.VAL_VAR
| 30.693069
| 78
| 0.475484
| 1,406
| 9,300
| 2.920341
| 0.086771
| 0.081588
| 0.052606
| 0.05358
| 0.749391
| 0.688505
| 0.63322
| 0.594496
| 0.57964
| 0.566245
| 0
| 0.018797
| 0.370753
| 9,300
| 302
| 79
| 30.794702
| 0.682843
| 0.009247
| 0
| 0.550607
| 0
| 0
| 0.052305
| 0
| 0
| 0
| 0
| 0.003311
| 0
| 1
| 0.020243
| false
| 0.004049
| 0.004049
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e0bbeb93835b36e23fb310038a044e9818c4553
| 13,451
|
py
|
Python
|
kecpkg/commands/sign.py
|
jberends/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | null | null | null |
kecpkg/commands/sign.py
|
jberends/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | 7
|
2017-12-07T11:16:07.000Z
|
2019-12-11T15:25:07.000Z
|
kecpkg/commands/sign.py
|
KE-works/kecpkg-tools
|
3c288c5b91b619fe76cd3622615f3ffe43509725
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from pprint import pprint
import click
from pykechain.utils import temp_chdir
from kecpkg.commands.utils import CONTEXT_SETTINGS
from kecpkg.gpg import get_gpg, list_keys, hash_of_file
from kecpkg.settings import SETTINGS_FILENAME, GNUPG_KECPKG_HOME, load_settings, DEFAULT_SETTINGS, ARTIFACTS_FILENAME, \
ARTIFACTS_SIG_FILENAME
from kecpkg.utils import remove_path, echo_info, echo_success, echo_failure, get_package_dir, unzip_package
@click.command(context_settings=CONTEXT_SETTINGS,
short_help="Perform package signing and key management.")
@click.argument('package', required=False)
@click.option('--settings', '--config', '-s', 'settings_filename',
help="path to the setting file (default `{}`".format(SETTINGS_FILENAME),
type=click.Path(), default=SETTINGS_FILENAME)
@click.option('--keyid', '--key-id', '-k', 'keyid',
help="ID (name, email, KeyID) of the cryptographic key to do the operation with. ")
# @click.option('--passphrase', '-p', 'sign_passphrase', hide_input=True,
# help="Passphrase of the cryptographic key to sign the contents of the package. "
# "Use in combination with `--sign` and `--keyid`")
@click.option('--import-key', '--import', '-i', 'do_import', type=click.Path(exists=True),
help="Import secret keyfile (in .asc) to the KECPKG keyring which will be used for signing. "
"You can export a created key in gpg with `gpg -a --export-secret-key [keyID] > secret_key.asc`.")
@click.option('--delete-key', '-d', 'do_delete_key',
help="Delete key by its fingerprint permanently from the KECPKG keyring. To retrieve the full "
"fingerprint of the key, use the `--list` option and look at the 'fingerprint' section.")
@click.option('--create-key', '-c', 'do_create_key', is_flag=True,
help="Create secret key and add it to the KECPKG keyring.")
@click.option('--export-key', '--export', '-e', 'do_export_key', type=click.Path(),
help="Export public key to filename with `--keyid KeyID` in .ASC format for public distribution.")
@click.option('--clear-keyring', 'do_clear', is_flag=True, default=False,
help="Clear all keys from the KECPKG keyring")
@click.option('--list', '-l', 'do_list', is_flag=True,
help="List all available keys in the KECPKG keyring")
@click.option('--verify-kecpkg', 'do_verify_kecpkg', type=click.Path(exists=True),
help="Verify contents and signature of an existing kecpkg.")
@click.option('--yes', '-y', 'do_yes', is_flag=True,
help="Don't ask questions, just do it.")
@click.option('-v', '--verbose', help="Be more verbose", is_flag=True)
def sign(package=None, **options):
"""Sign the package."""
# noinspection PyShadowingNames
def _do_clear(options):
echo_info("Clearing all keys from the KECPKG keyring")
if not options.get('do_yes'):
options['do_yes'] = click.confirm("Are you sure you want to clear the KECPKG keyring?", default=False)
if options.get('do_yes'):
remove_path(GNUPG_KECPKG_HOME)
echo_success("Completed")
sys.exit(0)
else:
echo_failure("Not removing the KECPKG keyring")
sys.exit(1)
def _do_list(gpg, explain=False):
if explain:
echo_info("Listing all keys from the KECPKG keyring")
result = gpg.list_keys(secret=True)
if len(result):
from tabulate import tabulate
print(tabulate(list_keys(gpg=gpg), headers=("Name", "Comment", "E-mail", "Expires", "Fingerprint")))
else:
if explain:
echo_info("No keys found in KECPKG keyring. Use `--import-key` or `--create-key` to add a "
"secret key to the KECPKG keyring in order to sign KECPKG's.")
sys.exit(1)
# noinspection PyShadowingNames
def _do_import(gpg, options):
echo_info("Importing secret key into KECPKG keyring from '{}'".format(options.get('do_import')))
result = gpg.import_keys(open(os.path.abspath(options.get('do_import')), 'rb').read())
# pprint(result.__dict__)
if result and result.sec_imported:
echo_success("Succesfully imported secret key into the KECPKG keystore")
_do_list(gpg=gpg)
sys.exit(0)
elif result and result.unchanged:
echo_failure("Did not import the secret key into the KECPKG keystore. The key was already "
"in place and was unchanged")
_do_list(gpg=gpg)
sys.exit(1)
echo_failure("Did not import a secret key into the KECPKG keystore. Is something wrong "
"with the file: '{}'? Are you sure it is a ASCII file containing a "
"private key block?".format(options.get('do_import')))
sys.exit(1)
# noinspection PyShadowingNames
def _do_delete_key(gpg, options):
echo_info("Deleting private key with ID '{}' from the KECPKG keyring".format(options.get('do_delete_key')))
# custom call to gpg using --delete-secret-and-public-key
result = gpg.result_map['delete'](gpg)
# noinspection PyProtectedMember
p = gpg._open_subprocess(['--yes', '--delete-secret-and-public-key', options.get('do_delete_key')])
# noinspection PyProtectedMember
gpg._collect_output(p, result, stdin=p.stdin)
# result = gpg.delete_keys(fingerprints=options.get('do_delete_key'),
# secret=True,
# passphrase=options.get('sign_passphrase'))
# pprint(result.__dict__)
if result and result.stderr.find("failed") < 0:
echo_success("Succesfully deleted key")
_do_list(gpg=gpg)
sys.exit(0)
echo_failure("Could not delete key.")
sys.exit(1)
# noinspection PyShadowingNames
def _do_create_key(gpg, options):
echo_info("Will create a secret key and store it into the KECPKG keyring.")
package_dir = get_package_dir(package_name=package, fail=False)
settings = DEFAULT_SETTINGS
if package_dir is not None:
package_name = os.path.basename(package_dir)
echo_info('Package `{}` has been selected'.format(package_name))
settings = load_settings(package_dir=package_dir, settings_filename=options.get('settings_filename'))
key_info = {'name_real': click.prompt("Name", default=settings.get('name')),
'name_comment': click.prompt("Comment", default="KECPKG SIGNING KEY"),
'name_email': click.prompt("Email", default=settings.get('email')),
'expire_date': click.prompt("Expiration in months", default=12,
value_proc=lambda i: "{}m".format(i)), 'key_type': 'RSA',
'key_length': 4096,
'key_usage': '',
'subkey_type': 'RSA',
'subkey_length': 4096,
'subkey_usage': 'encrypt,sign,auth',
'passphrase': ''}
passphrase = click.prompt("Passphrase", hide_input=True)
passphrase_confirmed = click.prompt("Confirm passphrase", hide_input=True)
if passphrase == passphrase_confirmed:
key_info['passphrase'] = passphrase
else:
raise ValueError("The passphrases did not match.")
echo_info("Creating the secret key '{name_real} ({name_comment}) <{name_email}>'".format(**key_info))
echo_info("Please move around mouse or generate other activity to introduce sufficient entropy. "
"This might take a minute...")
result = gpg.gen_key(gpg.gen_key_input(**key_info))
pprint(result.__dict__)
if result and result.stderr.find('KEY_CREATED'):
echo_success("The key is succesfully created")
_do_list(gpg=gpg)
sys.exit(0)
echo_failure("Could not generate the key due to an error: '{}'".format(result.stderr))
sys.exit(1)
# noinspection PyShadowingNames
def _do_export_key(gpg, options):
"""Export public key."""
echo_info("Exporting public key")
if options.get('keyid') is None:
_do_list(gpg=gpg)
options['keyid'] = click.prompt("Provide KeyId (name, comment, email, fingerprint) of the key to export")
result = gpg.export_keys(keyids=[options.get('keyid')], secret=False, armor=True)
if result is not None:
with open(options.get('do_export_key'), 'w') as fd:
fd.write(result)
echo_success("Sucessfully written public key to '{}'".format(options.get('do_export_key')))
sys.exit(0)
echo_failure("Could not export key")
sys.exit(1)
# noinspection PyShadowingNames
def _do_verify_kecpkg(gpg, options):
"""Verify the kecpkg."""
echo_info("Verify the contents of the KECPKG and if the KECPKG is signed with a valid signature.")
current_working_directory = os.getcwd()
with temp_chdir() as d:
unzip_package(package_path=os.path.join(current_working_directory, options.get('do_verify_kecpkg')),
target_path=d)
verify_signature(d, artifacts_filename=ARTIFACTS_FILENAME, artifacts_sig_filename=ARTIFACTS_SIG_FILENAME)
verify_artifacts_hashes(d, artifacts_filename=ARTIFACTS_FILENAME)
sys.exit(0)
#
# Dispatcher to subfunctions
#
if options.get('do_clear'):
_do_clear(options=options)
elif options.get('do_list'):
_do_list(gpg=get_gpg(), explain=True)
elif options.get('do_import'):
_do_import(gpg=get_gpg(), options=options)
elif options.get('do_delete_key'):
_do_delete_key(gpg=get_gpg(), options=options)
elif options.get('do_create_key'):
_do_create_key(gpg=get_gpg(), options=options)
elif options.get('do_export_key'):
_do_export_key(gpg=get_gpg(), options=options)
elif options.get('do_verify_kecpkg'):
_do_verify_kecpkg(gpg=get_gpg(), options=options)
else:
sys.exit(500)
sys.exit(0)
def verify_signature(package_dir, artifacts_filename, artifacts_sig_filename):
"""
Check signature of the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: path of the artifacts file
:param artifacts_sig_filename: path of the artifacts signature file
:return: None
"""
gpg = get_gpg()
artifacts_fp = os.path.join(package_dir, artifacts_filename)
artifacts_sig_fp = os.path.join(package_dir, artifacts_sig_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
if not os.path.exists(artifacts_sig_fp):
echo_failure("Artifacts signature file does not exist: '{}'. Is the package signed?".
format(artifacts_filename))
sys.exit(1)
with open(artifacts_sig_fp, 'rb') as sig_fd:
results = gpg.verify_file(sig_fd, data_filename=artifacts_fp)
if results.valid:
echo_info("Verified the signature and the signature is valid")
echo_info("Signed with: '{}'".format(results.username))
elif not results.valid:
echo_failure("Signature of the package is invalid")
echo_failure(pprint(results.__dict__))
sys.exit(1)
def verify_artifacts_hashes(package_dir, artifacts_filename):
"""
Check the hashes of the artifacts in the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: filename of the artifacts file
:return:
"""
artifacts_fp = os.path.join(package_dir, artifacts_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
with open(artifacts_fp, 'r') as fd:
artifacts = fd.readlines()
# process the file contents
# A line is "README.md,sha256=d831....ccf79a,336"
# ^filename ^algo ^hash ^size in bytes
fails = []
for af in artifacts:
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
filename, hash, orig_size = af.split(',')
algorithm, orig_hash = hash.split('=')
fp = os.path.join(package_dir, filename)
if os.path.exists(fp):
found_hash = hash_of_file(fp, algorithm)
found_size = os.stat(fp).st_size
if found_hash != orig_hash.strip() or found_size != int(orig_size.strip()):
fails.append("File '{}' is changed in the package.".format(filename))
fails.append("File '{}' original checksum: '{}', found: '{}'".format(filename, orig_hash, found_hash))
fails.append("File '{}' original size: {}, found: {}".format(filename, orig_size, found_size))
else:
fails.append("File '{}' does not exist".format(filename))
if fails:
echo_failure('The package has been changed after building the package.')
for fail in fails:
print(fail)
sys.exit(1)
else:
echo_info("Package contents succesfully verified.")
| 46.867596
| 120
| 0.63564
| 1,703
| 13,451
| 4.840282
| 0.182032
| 0.026689
| 0.026204
| 0.02402
| 0.273323
| 0.186219
| 0.140968
| 0.11258
| 0.101905
| 0.068179
| 0
| 0.004342
| 0.246673
| 13,451
| 286
| 121
| 47.031469
| 0.809138
| 0.105494
| 0
| 0.180095
| 0
| 0.009479
| 0.297193
| 0.002514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047393
| false
| 0.028436
| 0.109005
| 0
| 0.156398
| 0.037915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e0dd95d1aaf80cae2655fcee6b6427ac437b94c
| 10,563
|
py
|
Python
|
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
doctor/lib/utils.py
|
freelawproject/doctor
|
3858b6f5de7903353f4376303329a986db5b7983
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
import io
import os
import re
import subprocess
import warnings
from collections import namedtuple
from decimal import Decimal
from pathlib import Path
import six
from PyPDF2 import PdfFileMerger
from reportlab.pdfgen import canvas
class DoctorUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return f"{original}. You passed in {self.obj!r} ({type(self.obj)})"
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b" ".join(
force_bytes(arg, encoding, strings_only, errors) for arg in s
)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, "__unicode__"):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DoctorUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join(force_text(arg, encoding, strings_only, errors) for arg in s)
return s
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
_PROTECTED_TYPES = six.integer_types + (
type(None),
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def audio_encoder(data):
return namedtuple("AudioFile", data.keys())(*data.values())
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", DeprecationWarning)
test_func(self, *args, **kwargs)
return do_test
def make_png_thumbnail_for_instance(filepath, max_dimension):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
command = [
"pdftoppm",
"-singlefile",
"-f",
"1",
"-scale-to",
str(max_dimension),
filepath,
"-png",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return stdout, stderr.decode("utf-8"), str(p.returncode)
def make_png_thumbnails(filepath, max_dimension, pages, directory):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
for page in pages:
command = [
"pdftoppm",
"-singlefile",
"-f",
str(page),
"-scale-to",
str(max_dimension),
filepath,
"-png",
f"{directory.name}/thumb-{page}",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
def pdf_bytes_from_image_array(image_list, output_path) -> None:
"""Make a pdf given an array of Image files
:param image_list: List of images
:type image_list: list
:return: pdf_data
:type pdf_data: PDF as bytes
"""
image_list[0].save(
output_path,
"PDF",
resolution=100.0,
save_all=True,
append_images=image_list[1:],
)
del image_list
def strip_metadata_from_path(file_path):
"""Convert PDF file into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
with open(file_path, "rb") as f:
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(f.read()))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def strip_metadata_from_bytes(pdf_bytes):
"""Convert PDF bytes into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(pdf_bytes))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def cleanup_form(form):
"""Clean up a form object"""
os.remove(form.cleaned_data["fp"])
def make_file(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": (filename, f.read())}
def make_buffer(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": ("filename", f.read())}
def pdf_has_images(path: str) -> bool:
"""Check raw PDF for embedded images.
We need to check if a PDF contains any images. If a PDF contains images it
likely has content that needs to be scanned.
:param path: Location of PDF to process.
:return: Does the PDF contain images?
:type: bool
"""
with open(path, "rb") as pdf_file:
pdf_bytes = pdf_file.read()
return True if re.search(rb"/Image ?/", pdf_bytes) else False
def ocr_needed(path: str, content: str) -> bool:
"""Check if OCR is needed on a PDF
Check if images are in PDF or content is empty.
:param path: The path to the PDF
:param content: The content extracted from the PDF.
:return: Whether OCR should be run on the document.
"""
if content.strip() == "" or pdf_has_images(path):
return True
return False
def make_page_with_text(page, data, h, w):
"""Make a page with text
:param page:
:param data:
:param h:
:param w:
:return:
"""
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=(w, h))
# Set to a standard size and font for now.
can.setFont("Helvetica", 9)
# Make the text transparent
can.setFillAlpha(0)
for i in range(len(data["level"])):
try:
letter, (x, y, ww, hh), pg = (
data["text"][i],
(data["left"][i], data["top"][i], data["width"][i], data["height"][i]),
data["page_num"][i],
)
except:
continue
# Adjust the text to an 8.5 by 11 inch page
sub = ((11 * 72) / h) * int(hh)
x = ((8.5 * 72) / w) * int(x)
y = ((11 * 72) / h) * int(y)
yy = (11 * 72) - y
if int(page) == int(pg):
can.drawString(x, yy - sub, letter)
can.showPage()
can.save()
packet.seek(0)
return packet
| 30.528902
| 87
| 0.619142
| 1,408
| 10,563
| 4.539773
| 0.253551
| 0.020651
| 0.015488
| 0.013141
| 0.371715
| 0.364362
| 0.356227
| 0.340895
| 0.315551
| 0.315551
| 0
| 0.005374
| 0.277667
| 10,563
| 345
| 88
| 30.617391
| 0.832372
| 0.312222
| 0
| 0.331606
| 0
| 0
| 0.06426
| 0.016318
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103627
| false
| 0.010363
| 0.062176
| 0.005181
| 0.321244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e0e1c62ee116428b55cffa380260139fb9ea5d8
| 906
|
py
|
Python
|
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | null | null | null |
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | null | null | null |
src/xrl/env_tester.py
|
k4ntz/XmodRL
|
dffb416bcd91010d8075ee1ac00cc4b9a3021967
|
[
"MIT"
] | 1
|
2021-11-10T18:09:27.000Z
|
2021-11-10T18:09:27.000Z
|
import gym
import numpy as np
import os
import random
import matplotlib.pyplot as plt
from atariari.benchmark.wrapper import AtariARIWrapper
# YarsRevenge
#
env_name = "DemonAttackDeterministic-v4"
def print_labels(env_info):
# extract raw features
labels = env_info["labels"]
print(labels)
env = AtariARIWrapper(gym.make(env_name))
name = env.unwrapped.spec.id
#ballgame = any(game in name for game in ["Pong", "Tennis"])
print(np.int16(3))
üsad
n_actions = env.action_space.n
_ = env.reset()
obs, _, done, info = env.step(0)
r = 0
for t in range(50000):
plt.imshow(env.render(mode='rgb_array'), interpolation='none')
plt.plot()
plt.pause(0.0001) # pause a bit so that plots are updated
action = random.randint(0, n_actions - 1)
obs, reward, done, info = env.step(action)
r += reward
print(reward)
print_labels(info)
if(done):
break
print(r)
| 22.65
| 66
| 0.695364
| 136
| 906
| 4.544118
| 0.558824
| 0.053398
| 0.045307
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024357
| 0.184327
| 906
| 40
| 67
| 22.65
| 0.811908
| 0.143488
| 0
| 0
| 0
| 0
| 0.059663
| 0.035019
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.2
| 0
| 0.233333
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e0ef102e2826e6b9febd80bed5d0193a3687555
| 2,711
|
py
|
Python
|
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | 3
|
2019-11-16T12:38:20.000Z
|
2019-11-17T08:44:41.000Z
|
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | null | null | null |
packages/lrn/model/question.py
|
genropy/learn
|
019286c1fa1548482f64ccbd91082e069ec62a56
|
[
"MIT"
] | 5
|
2019-11-16T16:22:10.000Z
|
2019-11-18T21:46:50.000Z
|
# encoding: utf-8
from datetime import datetime
class Table(object):
def config_db(self,pkg):
tbl=pkg.table('question', pkey='id', name_long='!![en]Question', name_plural='!![en]Questions',caption_field='question')
self.sysFields(tbl, draftField=True)
tbl.column('question',name_long='!![en]Question', validate_notnull=True)
tbl.column('description', name_long='!![en]Description')
tbl.column('details', name_long='!![en]Details')
tbl.column('user_id',size='22', group='_', name_long='!![en]Inserted by'
).relation('adm.user.id', relation_name='myquestions',
mode='foreignkey', onDelete='raise')
tbl.column('approval_ts', dtype='DH', name_long='!![en]Approval TS')
tbl.column('approved_by_user_id', size='22', group='_', name_long='!![en]Approved by'
).relation('adm.user.id',
relation_name='approved_questions',
mode='foreignkey', onDelete='raise')
tbl.column('main_topic_id',size='22', group='_', name_long='!![en]Main topic'
).relation('topic.id',
relation_name='questions',
mode='foreignkey',
onDelete='setnull')
tbl.column('main_answer_id',size='22', group='_', name_long='!![en]Main answer'
).relation('answer.id',
relation_name='questions',
mode='foreignkey',
onDelete='setnull')
#tbl.formulaColumn('__protected_by_approval_ts',"""($approval_ts IS NOT NULL AND $approved_by_user_id!=:env_user_id)""",dtype='B')
def defaultValues(self):
user_id = self.db.currentEnv.get('user_id')
#Se l'utente ha i giusti requisiti le sue domande e le sue risposte non nascono com ebozza
if 'admin' in self.db.currentEnv['userTags']: #posso pensare ad una condizione migliore e più sofisticata
return dict( __is_draft = False,
approval_ts = datetime.now(),
approved_by_user_id = user_id,
user_id=user_id)
return dict(__is_draft=True, user_id = user_id)
def trigger_onUpdating(self, record, old_record):
#Quando un record passa da bozza ad approvato metto utente approvatore e timestamp di approvazione
if old_record['__is_draft'] and not record['__is_draft']:
record['approval_ts'] = datetime.now()
record['approved_by_user_id'] = self.db.currentEnv.get('user_id')
| 54.22
| 138
| 0.570638
| 309
| 2,711
| 4.770227
| 0.368932
| 0.065129
| 0.061058
| 0.035278
| 0.297151
| 0.297151
| 0.232022
| 0.189959
| 0.074627
| 0
| 0
| 0.004719
| 0.29657
| 2,711
| 50
| 139
| 54.22
| 0.768222
| 0.143121
| 0
| 0.210526
| 0
| 0
| 0.218292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.026316
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e11d3f12bcf35bac083ace9a1b7490250555694
| 3,087
|
py
|
Python
|
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
core/api/OxfordAPI.py
|
vimarind/Complete-GRE-Vocab
|
6dc8bb8ed0506ed572edd1a01a456d9a27238c94
|
[
"MIT"
] | null | null | null |
import json
import requests
from os import path
class OxfordAPI:
def __init__(self, app_id, app_key, cache_path):
self.app_id = app_id
self.app_key = app_key
self.cache_path = cache_path
def __parse_sense(self, word, sense):
for definition in sense.get('definitions', list()):
word.definitions.append(definition)
for example in sense.get('examples', list()):
word.examples.append(example.get('text', None))
for synonym in sense.get('synonyms', list()):
word.synonyms.append(synonym.get('text', None))
for subsense in sense.get('subsenses', list()):
self.__parse_sense(word, subsense)
def __parse_pronunciation(self, word, pronunciation):
audioFile = pronunciation.get('audioFile', None)
if audioFile is not None:
word.audio_file = audioFile
def __parse_entry(self, word, entry):
for pronunciation in entry.get('pronunciations', list()):
self.__parse_pronunciation(word, pronunciation)
for sense in entry.get('senses', list()):
self.__parse_sense(word, sense)
def __parse_lexical_entry(self, word, lexical_entry):
for entry in lexical_entry.get('entries', list()):
self.__parse_entry(word, entry)
def __parse_result(self, word, result):
for lexical_entry in result.get('lexicalEntries', list()):
self.__parse_lexical_entry(word, lexical_entry)
def __parse_word(self, word, data):
success = False
if data.get('error') is None:
for result in data.get('results', list()):
self.__parse_result(word, result)
success = True
return success
def __get_word_data(self, word):
filepath = self.cache_path + word.text + '.json'
with open(filepath, 'w') as file:
url = "https://od-api.oxforddictionaries.com/api/v2/words/en-us?q=" + word.text
r = requests.get(url, headers={"app_id": self.app_id, "app_key": self.app_key})
file.write(r.text)
return r.json()
def get_word(self, word):
"""
Populates the given word object with the relevant information from the Oxford Dictionary API. First, the word
is looked for in the cache folder, if it exists, load that data. Otherwise, the information is requested from
the OxfordAPI and stored in the cache folder.
:param word: The word object to be populated.
:return: A boolean indicating if the operation has been successful or not.
"""
success = False
if path.exists(self.cache_path):
filepath = self.cache_path + word.text + '.json'
if path.exists(filepath):
with open(filepath, 'r') as file:
data = json.load(file)
else:
data = self.__get_word_data(word)
success = self.__parse_word(word, data)
else:
print('OxfordAPI: Please provide a valid cache path.')
return success
| 38.111111
| 117
| 0.618724
| 390
| 3,087
| 4.705128
| 0.279487
| 0.034877
| 0.042507
| 0.019619
| 0.076294
| 0.035967
| 0.035967
| 0
| 0
| 0
| 0
| 0.000449
| 0.278264
| 3,087
| 80
| 118
| 38.5875
| 0.82316
| 0.12504
| 0
| 0.135593
| 0
| 0.016949
| 0.088948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0
| 0.050847
| 0
| 0.271186
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1202ada111dfedf3e1239998ddc9e7e0c2bac2
| 2,568
|
py
|
Python
|
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | null | null | null |
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | null | null | null |
linked_list.py
|
bentsi/data-structures
|
ce4a3a49ec131550ec0b77875b8f0367addcca05
|
[
"Apache-2.0"
] | 1
|
2021-01-10T15:41:50.000Z
|
2021-01-10T15:41:50.000Z
|
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class LinkedListIndexError(IndexError):
pass
class LinkedList:
def __init__(self):
self.head = Node()
def _get_last_node(self):
pointer = self.head
while pointer.next is not None:
pointer = pointer.next
return pointer
def get_last_node(self):
return self._get_last_node().data
def append(self, data):
new_node = Node(data=data)
last = self._get_last_node()
last.next = new_node
def print(self):
print(self.__str__())
def __str__(self):
pointer = self.head
idx = 0
ll_str = ""
while pointer.next is not None:
pointer = pointer.next
ll_str += f"{idx}: {pointer.data}\n"
idx += 1
return ll_str
def length(self):
pointer = self.head
counter = 0
while pointer.next is not None:
pointer = pointer.next
counter += 1
return counter
def _get(self, index):
pointer = self.head
counter = 0
if not(0 <= index < self.length()):
raise LinkedListIndexError(f"Index '{index}' does not exist")
while pointer.next is not None:
pointer = pointer.next
if counter == index:
return pointer
counter += 1
def get(self, index):
return self._get(index=index).data
def __getitem__(self, item):
return self.get(index=item)
def erase(self, index):
if index == 0:
prev = self.head
else:
prev = self._get(index=index - 1)
to_del = prev.next
prev.next = to_del.next
data = to_del.data
del to_del
return data
def set(self, index, new_data):
node = self._get(index=index)
node.data = new_data
def __del__(self):
length = self.length()
while length != 0:
self.erase(index=length - 1)
length -= 1
del self.head
if __name__ == '__main__':
ll = LinkedList()
ll.append(data="Fedor")
ll.append(data="Julia")
ll.append(data="Bentsi")
ll.print()
print("Length of the Linked list is: ", ll.length())
idx = 1
print(ll.get(index=idx))
print(f"Data at index {idx} is {ll[idx]}")
print("Deleted: ", ll.erase(index=0))
ll.append(data="Fedor")
ll.append(data="Bentsi")
ll.set(index=3, new_data="Tim Peters")
print(ll)
| 24.457143
| 73
| 0.550234
| 326
| 2,568
| 4.153374
| 0.180982
| 0.064993
| 0.044313
| 0.053176
| 0.251108
| 0.169867
| 0.169867
| 0.127031
| 0.127031
| 0
| 0
| 0.008824
| 0.338006
| 2,568
| 104
| 74
| 24.692308
| 0.787647
| 0
| 0
| 0.255814
| 0
| 0
| 0.06581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162791
| false
| 0.011628
| 0
| 0.034884
| 0.290698
| 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e14d6737904e50f196708249c8435de6151b062
| 2,768
|
py
|
Python
|
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
custom_html_validator/custom_html_validator.py
|
koan-u/custom_html_validator
|
1a6735146e64d3c346201d10eddfd9ebfe1377c2
|
[
"MIT"
] | null | null | null |
from html.parser import HTMLParser
class CustomHTMLValidater(HTMLParser):
__SINGLE_TAGS = [
'area','base','br','col','embed',
'hr','img','input','keygen','link',
'meta','param','source','track','wbr'
]
def __init__(self):
HTMLParser.__init__(self)
self.reset(True)
def reset(self, tag_reset = False):
HTMLParser.reset(self)
self.__core = {
'status': 0,
'detail':'',
'detected_list':[]
}
if tag_reset:
self.__allowed_tags = []
return
def set_allowed_tags(self, __allowed_tags):
self.__allowed_tags = __allowed_tags
return
def handle_starttag(self,tag,attrs):
if self.__core['status'] == 0:
if not tag in self.__allowed_tags:
self.__core['status'] = -1
self.__core['detail'] = 'not_allowed_tag'
else:
for attr in attrs:
if not attr[0] in self.__allowed_tags[tag]:
self.__core['status'] = -1
self.__core['detail'] = 'not_allowed_attr'
return
detected = {
'tag': tag,
'attr': attrs,
'complete': False
}
self.__core['detected_list'].append(detected)
return
def handle_endtag(self,tag):
if self.__core['status'] == 0:
last_index = len(self.__core['detected_list']) - 1
for index in range(last_index, -1, -1):
data = self.__core['detected_list'][index]
if not data['complete']:
if data['tag'] == tag:
data['complete'] = True
return
elif data['tag'] in self.__SINGLE_TAGS:
data['complete'] = True
else:
break
self.__core['status'] = -1
self.__core['detail'] = 'Construction Error'
return
def close(self):
HTMLParser.close(self)
if self.__core['status'] == 0:
errored = False
for data in self.__core['detected_list']:
if not data['complete']:
if data['tag'] in self.__SINGLE_TAGS:
data['complete'] = True
continue
self.__core['status'] = -1
self.__core['detail'] = 'Construction Error'
errored = True
break
if not errored:
self.__core['status'] = 1
self.__core['detail'] = 'ok'
return self.__core
| 33.756098
| 66
| 0.462789
| 264
| 2,768
| 4.511364
| 0.25
| 0.127624
| 0.105793
| 0.062972
| 0.347607
| 0.270361
| 0.270361
| 0.208228
| 0.208228
| 0
| 0
| 0.00811
| 0.420882
| 2,768
| 81
| 67
| 34.17284
| 0.734872
| 0
| 0
| 0.324324
| 0
| 0
| 0.126084
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.013514
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e14d840b0d68fa20db94e8f512ad11ba709e64f
| 1,841
|
py
|
Python
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 15
|
2016-10-21T14:30:38.000Z
|
2021-10-12T04:50:48.000Z
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 51
|
2016-02-05T01:24:32.000Z
|
2019-12-09T16:52:20.000Z
|
boltfile.py
|
arahmanhamdy/bolt
|
8f5d9b8149db833b54a7b353162b2c28a53c8aff
|
[
"MIT"
] | 6
|
2016-10-17T13:48:16.000Z
|
2021-03-28T20:40:14.000Z
|
import logging
import os.path
import bolt
import bolt.about
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
_src_dir = os.path.join(PROJECT_ROOT, 'bolt')
_test_dir = os.path.join(PROJECT_ROOT, 'test')
_output_dir = os.path.join(PROJECT_ROOT, 'output')
_coverage_dir = os.path.join(_output_dir, 'coverage')
config = {
'pip': {
'command': 'install',
'options': {
'r': './requirements.txt'
}
},
'delete-pyc': {
'sourcedir': _src_dir,
'recursive': True,
'test-pyc': {
'sourcedir': _test_dir,
}
},
'conttest' : {
'task': 'ut'
},
'mkdir': {
'directory': _output_dir,
},
'nose': {
'directory': _test_dir,
'ci': {
'options': {
'with-xunit': True,
'xunit-file': os.path.join(_output_dir, 'unit_tests_log.xml'),
'with-coverage': True,
'cover-erase': True,
'cover-package': 'bolt',
'cover-html': True,
'cover-html-dir': _coverage_dir,
'cover-branches': True,
}
}
},
'setup': {
'command': 'bdist_wheel',
'egg-info': {
'command': 'egg_info'
}
},
'coverage': {
'task': 'nose',
'include': ['bolt'],
'output': os.path.join(_output_dir, 'ut_coverage')
}
}
# Development tasks
bolt.register_task('clear-pyc', ['delete-pyc', 'delete-pyc.test-pyc'])
bolt.register_task('ut', ['clear-pyc', 'nose'])
bolt.register_task('ct', ['conttest'])
bolt.register_task('pack', ['setup', 'setup.egg-info'])
# CI/CD tasks
bolt.register_task('run-unit-tests', ['clear-pyc', 'mkdir', 'nose.ci'])
# Default task (not final).
bolt.register_task('default', ['pip', 'ut'])
| 25.569444
| 78
| 0.523628
| 197
| 1,841
| 4.675127
| 0.329949
| 0.058632
| 0.065147
| 0.05646
| 0.140065
| 0.078176
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293319
| 1,841
| 71
| 79
| 25.929577
| 0.707917
| 0.029875
| 0
| 0.032258
| 0
| 0
| 0.280584
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e158c914469c96413a23f9b7926f662ec188191
| 1,309
|
py
|
Python
|
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
assignments/04_head/head.py
|
emma-huffman/biosystems-analytics-2020
|
eaf9c084407fa6d25b815b7d63077ed9aec53447
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : Me <me@foo.com>
Date : today
Purpose: Rock the Casbah
"""
import argparse
import io
import os
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n',
'--num',
help='Number of lines',
metavar='int',
type=int,
default=10)
parser.add_argument('file',
help='Input File',
type=argparse.FileType('r'))
args = parser.parse_args()
if not args.num > 0:
parser.error(f'--num "{args.num}" must be greater than 0')
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for fh in args.file:
print(fh.name)
num_line = 0
for line in fh:
num_line += 1
print(line, end='')
if num_line == args.num:
break
# --------------------------------------------------
if __name__ == '__main__':
main()
| 23.375
| 66
| 0.450726
| 130
| 1,309
| 4.407692
| 0.561538
| 0.036649
| 0.045375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007982
| 0.330023
| 1,309
| 55
| 67
| 23.8
| 0.645382
| 0.220779
| 0
| 0
| 0
| 0
| 0.104104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.212121
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1798f13a1e5958c9273e51efaff12141f4e76c
| 9,497
|
py
|
Python
|
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
js_components/cms_plugins.py
|
compoundpartners/js-components
|
a58a944254354078a0a7b53a4c9a7df50790267a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import six
from django.utils.translation import ugettext_lazy as _
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from cms.plugin_base import CMSPluginBase, CMSPluginBaseMetaclass
from cms.plugin_pool import plugin_pool
from . import models, forms
from .utils.urlmatch import urlmatch
from .constants import (
HIDE_PROMO,
HIDE_PROMO_ROLLOVER,
HIDE_PROMO_VIDEO,
HIDE_TWITTER,
HIDE_COUNTERS,
HIDE_RAWHTML,
HIDE_GATED_CONTENT,
HIDE_FLOAT,
HIDE_LIGHTBOX,
CUSTOM_PLUGINS,
PROMO_CHILD_CLASSES,
)
class LayoutMixin():
def get_layout(self, context, instance, placeholder):
return instance.layout
def get_render_template(self, context, instance, placeholder):
layout = self.get_layout(context, instance, placeholder)
if layout:
template = self.TEMPLATE_NAME % layout
try:
select_template([template])
return template
except TemplateDoesNotExist:
pass
return self.render_template
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
})
return context
class PromoUnitPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/promo_%s.html'
name = _('Promo Unit')
model = models.PromoUnit
form = forms.PromoUnitForm
render_template = 'js_components/promo.html'
change_form_template = 'admin/js_components/float.html'
allow_children = True if PROMO_CHILD_CLASSES else False
child_classes = PROMO_CHILD_CLASSES
main_fields = [
'layout',
'alignment',
'title',
'subtitle',
'color',
'image',
'svg',
'icon',
'content',
'rollover_content',
'background_video',
'link_text',
'link_url',
('file_src', 'show_filesize'),
'open_in_new_window',
'full_height',
]
if HIDE_PROMO_ROLLOVER:
main_fields.remove('rollover_content')
if HIDE_PROMO_VIDEO:
main_fields.remove('background_video')
fieldsets = [
(None, {
'fields': main_fields
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'modal_id',
'attributes',
)
}),
]
if not HIDE_PROMO:
plugin_pool.register_plugin(PromoUnitPlugin)
class TwitterFeedPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/twitter_%s.html'
name = _('Twitter Feed')
model = models.TwitterFeed
form = forms.TwitterFeedForm
render_template = 'js_components/twitter.html'
if not HIDE_TWITTER:
plugin_pool.register_plugin(TwitterFeedPlugin)
class CountersContainerPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/counters_%s.html'
name = _('Counters Container (DO NOT USE, NEED REMOVE)')
model = models.CountersContainer
form = forms.CountersContainerForm
render_template = 'js_components/counters.html'
allow_children = True
child_classes = ['CounterPlugin']
parent_classes = ['Bootstrap4GridRowPlugin']
class CounterPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/counter_%s.html'
name = _('Counter')
model = models.Counter
form = forms.CounterForm
render_template = 'js_components/counter.html'
if not HIDE_COUNTERS:
plugin_pool.register_plugin(CountersContainerPlugin)
plugin_pool.register_plugin(CounterPlugin)
#if 'Bootstrap4GridRowPlugin' in plugin_pool.plugins:
#plugin_pool.plugins['Bootstrap4GridRowPlugin'].child_classes.append('CountersContainerPlugin')
class RawHTMLPlugin(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Raw HTML')
model = models.RawHTML
render_template = 'js_components/html.html'
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
'html': instance.body,
})
return context
class RawHTMLWithIDPlugin(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Raw HTML with ID')
model = models.RawHTMLWithID
render_template = 'js_components/html.html'
def render(self, context, instance, placeholder):
request = context['request']
html = instance.body
for param in instance.parameters.split(','):
param = param.strip()
key = '[%s]' % param.upper()
html = html.replace(key, request.GET.get(param) or request.POST.get(param, ''))
context.update({
'instance': instance,
'placeholder': placeholder,
'html': html,
})
return context
if not HIDE_RAWHTML:
plugin_pool.register_plugin(RawHTMLPlugin)
plugin_pool.register_plugin(RawHTMLWithIDPlugin)
@plugin_pool.register_plugin
class CustomPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/custom_%s.html'
name = _('Custom')
model = models.Custom
form = forms.CustomForm
render_template = 'js_components/custom.html'
def get_form(self, request, obj=None, **kwargs):
Form = super().get_form(request, obj=None, **kwargs)
if self.name in CUSTOM_PLUGINS:
Form.plugin_name=self.name
return Form
for name, parameters in CUSTOM_PLUGINS.items():
p = type(
str(name.replace(' ', '') + 'Plugin'),
(CustomPlugin,),
{'name': name},
)
plugin_pool.register_plugin(p)
class GatedContentPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/gated_content_%s.html'
name = _('Gated Content')
model = models.GatedContent
form = forms.GatedContentForm
render_template = 'js_components/gated_content.html'
allow_children = True
if not HIDE_GATED_CONTENT:
plugin_pool.register_plugin(GatedContentPlugin)
@plugin_pool.register_plugin
class AnimatePlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/animate_%s.html'
name = _('Animate')
model = models.Animate
form = forms.AnimateForm
render_template = 'js_components/animate.html'
allow_children = True
@plugin_pool.register_plugin
class JSFolderPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/folder_%s.html'
name = _('Filer listing')
model = models.Folder
form = forms.FolderForm
render_template = 'js_components/folder.html'
def render(self, context, instance, placeholder):
request = context['request']
files = []
if instance.folder:
files = instance.folder.files.all()
if instance.order_by:
files = files.order_by(instance.order_by)
context.update({
'instance': instance,
'placeholder': placeholder,
'files': files,
})
return context
@plugin_pool.register_plugin
class IncludeExcludeContainer(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Include/Exclude Container')
model = models.IncludeExcludeContainer
render_template = 'js_components/container.html'
change_form_template = 'admin/js_components/change_form_container.html'
allow_children = True
cache = False
def render(self, context, instance, placeholder):
request = context['request']
url = '%s://%s%s' % (request.scheme, request.META['HTTP_HOST'], request.path)
is_shown = urlmatch(','.join(instance.include.split('\n')), url) and not urlmatch(','.join(instance.exclude.split('\n')), url)
context.update({
'instance': instance,
'placeholder': placeholder,
'is_shown': is_shown,
})
return context
class FloatPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Float Container')
model = models.Float
form = forms.FloatForm
render_template = 'js_components/float.html'
TEMPLATE_NAME = 'js_components/float_%s.html'
#change_form_template = 'admin/js_components/float.html'
allow_children = True
def get_layout(self, context, instance, placeholder):
return '' if instance.alignment in ['left', 'right', 'center'] else instance.alignment
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
'alignment': instance.alignment,
})
return context
if not HIDE_FLOAT:
plugin_pool.register_plugin(FloatPlugin)
class LightboxPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/lightbox_%s.html'
name = _('Lightbox')
model = models.Lightbox
form = forms.LightboxForm
render_template = 'js_components/lightbox.html'
allow_children = True
child_classes = ['Bootstrap4PicturePlugin']
if not HIDE_LIGHTBOX:
plugin_pool.register_plugin(LightboxPlugin)
| 29.958991
| 134
| 0.666737
| 968
| 9,497
| 6.330579
| 0.202479
| 0.050914
| 0.041123
| 0.05483
| 0.332245
| 0.28688
| 0.259465
| 0.227807
| 0.212141
| 0.096606
| 0
| 0.000687
| 0.233126
| 9,497
| 316
| 135
| 30.053797
| 0.840725
| 0.023376
| 0
| 0.262548
| 0
| 0
| 0.178964
| 0.080906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03861
| false
| 0.003861
| 0.042471
| 0.007722
| 0.517375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e18ddf285ec21f8d58dafd4142a06363020741a
| 1,232
|
py
|
Python
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 649
|
2016-09-09T07:38:19.000Z
|
2022-03-28T04:30:55.000Z
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 362
|
2016-09-08T16:25:30.000Z
|
2022-03-05T23:15:05.000Z
|
src/julia/tests/test_juliaoptions.py
|
dpinol/pyjulia
|
cec4bf0b0eac7e39cecd8f3e7882563062903d0f
|
[
"MIT"
] | 85
|
2016-11-08T09:32:44.000Z
|
2022-03-03T13:10:37.000Z
|
import pytest
from julia.core import JuliaOptions
# fmt: off
@pytest.mark.parametrize("kwargs, args", [
({}, []),
(dict(compiled_modules=None), []),
(dict(compiled_modules=False), ["--compiled-modules", "no"]),
(dict(compiled_modules="no"), ["--compiled-modules", "no"]),
(dict(depwarn="error"), ["--depwarn", "error"]),
(dict(sysimage="PATH"), ["--sysimage", "PATH"]),
(dict(bindir="PATH"), ["--home", "PATH"]),
])
# fmt: on
def test_as_args(kwargs, args):
assert JuliaOptions(**kwargs).as_args() == args
@pytest.mark.parametrize("kwargs", [
dict(compiled_modules="invalid value"),
dict(bindir=123456789),
])
def test_valueerror(kwargs):
with pytest.raises(ValueError) as excinfo:
JuliaOptions(**kwargs)
assert "Option" in str(excinfo.value)
assert "accept" in str(excinfo.value)
# fmt: off
@pytest.mark.parametrize("kwargs", [
dict(invalid_option=None),
dict(invalid_option_1=None, invalid_option_2=None),
])
# fmt: on
def test_unsupported(kwargs):
with pytest.raises(TypeError) as excinfo:
JuliaOptions(**kwargs)
assert "Unsupported Julia option(s): " in str(excinfo.value)
for key in kwargs:
assert key in str(excinfo.value)
| 28
| 65
| 0.655844
| 149
| 1,232
| 5.328859
| 0.315436
| 0.11335
| 0.095718
| 0.085642
| 0.210327
| 0.083123
| 0
| 0
| 0
| 0
| 0
| 0.010659
| 0.162338
| 1,232
| 43
| 66
| 28.651163
| 0.758721
| 0.026786
| 0
| 0.21875
| 0
| 0
| 0.143216
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1b5f4b3183d1482047160b015715a1f35d97f0
| 389
|
py
|
Python
|
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
lambda/exercices/PhotoCollector/photo_uploader_from_csv.py
|
Mythridor/aws-scripting
|
5f978ae7f2b05a40862cbe35d766534fcc40fef0
|
[
"MIT"
] | null | null | null |
#! /usr/local/bin/Python3.5
import urllib.request
with open("images.csv", 'r') as csv:
i = 0
for line in csv:
line = line.split(',')
if line[1] != '' and line[1] != "\n":
urllib.request.urlretrieve(line[1].encode('utf-8'), ("img_" + str(i) + ".jpg").encode('utf-8'))
print("Image saved".encode('utf-8'))
i += 1
print("No result")
| 25.933333
| 107
| 0.524422
| 57
| 389
| 3.561404
| 0.631579
| 0.073892
| 0.147783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.254499
| 389
| 14
| 108
| 27.785714
| 0.665517
| 0.066838
| 0
| 0
| 0
| 0
| 0.157459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1b7c970fedf5252f5d2635e9703e31344e54e5
| 1,031
|
py
|
Python
|
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
src/main/tools/api.py
|
NGnius/streamq
|
aa31085befc7da2e3f7461698b2638a246a73eef
|
[
"MIT"
] | null | null | null |
'''
API-related functions in one spot for convenience
Created by NGnius 2019-06-15
'''
from flask import jsonify, request
from threading import Semaphore, RLock
def get_param(param, silent=False):
if request.method == 'GET':
return request.args.get(param)
else:
try:
return request.get_json(force=True, silent=silent)[param]
except KeyError:
return None
def error(status=500, reason=None):
error_response = {'status':status}
if reason is not None:
error_response['reason'] = reason
return jsonify(error_response), status
single_semaphores = dict()
resource_lock = RLock()
def start_single(identifier):
resource_lock.acquire()
if identifier not in single_semaphores:
resource_lock.release()
single_semaphores[identifier] = Semaphore(1)
else:
resource_lock.release()
single_semaphores[identifier].acquire()
def end_single(identifier):
resource_lock.acquire()
single_semaphores[identifier].release()
| 26.435897
| 69
| 0.693501
| 124
| 1,031
| 5.629032
| 0.459677
| 0.114613
| 0.111748
| 0.080229
| 0.229226
| 0.12894
| 0
| 0
| 0
| 0
| 0
| 0.014742
| 0.210475
| 1,031
| 38
| 70
| 27.131579
| 0.842752
| 0.076625
| 0
| 0.214286
| 0
| 0
| 0.01589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1bee51dd0ea1878f4a4736c40b34f0977aa174
| 3,968
|
py
|
Python
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.models as models
from torch.autograd import Variable
class MobileNet(nn.Module):
def __init__(self):
super(MobileNet, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn( 3, 32, 2),
conv_dw( 32, 64, 1),
conv_dw( 64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7),
)
self.fc = nn.Linear(1024, 1000)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def speed(model, name):
t0 = time.time()
input = torch.rand(1,3,224,224).npu()
input = Variable(input, volatile = True)
t1 = time.time()
model(input)
t2 = time.time()
model(input)
t3 = time.time()
print('%10s : %f' % (name, t3 - t2))
if __name__ == '__main__':
#cudnn.benchmark = True # This will make network slow ??
resnet18 = models.resnet18().npu()
alexnet = models.alexnet().npu()
vgg16 = models.vgg16().npu()
squeezenet = models.squeezenet1_0().npu()
mobilenet = MobileNet().npu()
speed(resnet18, 'resnet18')
speed(alexnet, 'alexnet')
speed(vgg16, 'vgg16')
speed(squeezenet, 'squeezenet')
speed(mobilenet, 'mobilenet')
| 35.115044
| 80
| 0.618952
| 522
| 3,968
| 4.641762
| 0.394636
| 0.034668
| 0.023112
| 0.024763
| 0.17499
| 0.15518
| 0.15518
| 0.15518
| 0.15518
| 0.086669
| 0
| 0.056791
| 0.263357
| 3,968
| 112
| 81
| 35.428571
| 0.772152
| 0.421119
| 0
| 0.191176
| 0
| 0
| 0.024746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.088235
| 0.029412
| 0.220588
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e1da62e19fe4f3008c5d21f24d0decbe6f6039d
| 1,012
|
py
|
Python
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 33
|
2021-09-04T08:46:48.000Z
|
2022-02-04T08:12:55.000Z
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 2
|
2021-09-28T12:05:21.000Z
|
2021-12-11T04:08:04.000Z
|
client/setup.py
|
nnabeyang/tepra-lite-esp32
|
69cbbafce6a3f8b0214178cc80d2fea024ab8c07
|
[
"MIT"
] | 2
|
2021-09-28T10:51:27.000Z
|
2021-12-10T09:56:22.000Z
|
from setuptools import setup, find_packages
__version__ = '1.0.0'
__author__ = 'Takumi Sueda'
__author_email__ = 'puhitaku@gmail.com'
__license__ = 'MIT License'
__classifiers__ = (
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
)
with open('README.md', 'r') as f:
readme = f.read()
setup(
name='tepracli',
version=__version__,
license=__license__,
author=__author__,
author_email=__author_email__,
url='https://github.com/puhitaku/tepra-lite-esp32/tree/master/client',
description='An example of tepra-lite-esp32 client / CLI',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=__classifiers__,
packages=find_packages(),
package_data={'': ['assets/ss3.ttf']},
include_package_data=True,
install_requires=['click', 'pillow', 'qrcode[pil]', 'requests'],
)
| 29.764706
| 74
| 0.6917
| 115
| 1,012
| 5.626087
| 0.66087
| 0.051005
| 0.07728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011848
| 0.166008
| 1,012
| 33
| 75
| 30.666667
| 0.754739
| 0
| 0
| 0
| 0
| 0
| 0.389328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e210cf9cae77591487ca0d70ca7341aca8bd44a
| 16,303
|
py
|
Python
|
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
src/colorpredicate.py
|
petrusmassabki/color-predicate
|
828f62b50985cb795aa5b5743e4f7e5c305d2175
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import colorsys
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class ColorPredicate:
def __init__(self, name, images_path, n_max=10):
self.name = name
self._total_pixel_count = 0
self.images = self.load_images(images_path, n_max)
self.masks = [255 * np.ones(image.shape[:2], np.uint8)
for image in self.images]
self._histogram_channels = None
self._histogram_color_space = None
self._bins = None
self._grid = None
self._ch_indexes = None
self._target_histogram = None
self._background_histogram = None
self._gaussian_smoothed_histogram = None
self._color_predicate = None
self.color_spaces = {
'hsv': cv2.COLOR_BGR2HSV
}
self.ch_ranges = {
'b': (0, 256), 'g': (0, 256), 'r': (0, 256),
'h': (0, 180), 's': (0, 256), 'v': (0, 256)
}
def load_images(self, path, n_max):
"""Load and return a list of up to `n_max` images from `path`."""
images_list = []
n_max = min(n_max, len(os.listdir(path)))
for filename in sorted(os.listdir(path))[:n_max]:
image = cv2.imread(os.path.join(path, filename))
if image is not None:
images_list.append(image)
self._total_pixel_count += image.shape[0] * image.shape[1]
return images_list
def load_masks(self, path):
"""Load and return a list of image masks from path."""
masks_list = []
n_images = len(self.images)
n_masks = len(os.listdir(path))
if n_masks >= len(self.images):
for filename in sorted(os.listdir(path))[:n_images]:
mask_gray = cv2.imread(os.path.join(path, filename), 0)
ret, mask = cv2.threshold(mask_gray, 127, 255,
cv2.THRESH_BINARY)
if mask is not None:
masks_list.append(mask)
self.masks = masks_list
else:
print(f'Directory must contain at least {n_images} image files, '
f'but only {n_masks} were provided. Masks will be ignored.')
@staticmethod
def sample_pixels(target_pixels, bg_pixels, target_sr, bg_rate):
"""Take a random sample of target and background pixels.
Parameters
----------
target_pixels : numpy.ndarray
Array of pixels from target region.
bg_pixels : numpy.ndarray
Array of pixels from background region.
target_sr : int or float
Target pixels sample rate (percentage of total target pixels).
bg_rate : int or float
Ratio of background to target pixels.
A value of 1.0 means equivalent distribution.
Returns
-------
target_pixels_sample : numpy.ndarray
Array of random samples from target region.
bg_pixels_sample : numpy.ndarray
Array of random samples from background region.
"""
n_target_pixels, n_bg_pixels = len(target_pixels), len(bg_pixels)
target_samples = n_target_pixels * target_sr
if n_bg_pixels > 0:
n_bg_samples = target_samples * bg_rate
target_bg_ratio = n_target_pixels / n_bg_pixels
if n_bg_samples > n_bg_pixels:
target_sr = n_bg_pixels / (n_target_pixels * bg_rate)
bg_sr = target_bg_ratio * target_sr * bg_rate
indexes_bg_samples = np.random.choice([0, 1],
size=n_bg_pixels,
p=[(1 - bg_sr), bg_sr])
bg_pixels_sample = bg_pixels[indexes_bg_samples == 1]
else:
bg_pixels_sample = bg_pixels
indexes_target_samples = np.random.choice([0, 1],
size=n_target_pixels,
p=[1 - target_sr, target_sr])
target_pixels_sample = target_pixels[indexes_target_samples == 1]
return target_pixels_sample, bg_pixels_sample
def create_multidimensional_histogram(self, color_space='bgr',
ch_indexes=(0, 1, 2),
bins=(8, 8, 8),
target_sr=1.0,
bg_rate=1.0):
"""Create a multidimensional histogram of instance's images.
Color space can be either RGB or HSV. Dimension is set according
to `ch_indexes` length. Sampling can be specified.
Parameters
----------
color_space : str, optional
Histogram color space. Accepts `bgr` (default) or `hsv`.
ch_indexes : tuple, optional
Sequence of histogram channel indexes. Values refer to
`color_space` string order. E.g, use (0, 2) to create a
2D histogram of channels b and r.
bins : tuple, optional
Sequence of histogram bins. Must be of same length of `ch_indexes`.
target_sr : int or float
Target pixels sample rate (percentage of total target pixels).
bg_rate : int or float
Ratio of background to target pixels. A value of 1.0 means
equivalent distribution.
Returns
-------
self._target_histogram : numpy.ndarray
2D or 3D histogram of sampled target pixels
self._bg_histogram : numpy.ndarray
2D or 3D histogram of samples background pixels
"""
print('Computing histogram...', end=' ')
target_pixels_per_image, bg_pixels_per_image = [], []
if sorted(ch_indexes) in ([0, 1], [0, 2], [1, 2], [0, 1, 2]):
self._histogram_channels = [color_space[i] for i in ch_indexes]
hist_range = [self.ch_ranges[ch] for ch in self._histogram_channels]
else:
raise ValueError('Parameter "ch_indexes" must be a sequence '
'of unique integers between 0 and 2')
for image, mask in zip(self.images, self.masks):
if color_space != 'bgr':
image = cv2.cvtColor(image, self.color_spaces[color_space])
target_pixels_per_image.append(image[mask > 0])
bg_pixels_per_image.append(image[~mask > 0])
target_pixels = np.concatenate(target_pixels_per_image)
bg_pixels = np.concatenate(bg_pixels_per_image)
target_samples, bg_samples = self.sample_pixels(target_pixels,
bg_pixels,
target_sr,
bg_rate)
self._target_histogram, _ = np.histogramdd(target_samples[:, ch_indexes],
bins=bins,
range=hist_range)
self._background_histogram, _ = np.histogramdd(bg_samples[:, ch_indexes],
bins=bins,
range=hist_range)
self._bins = bins
self._histogram_color_space = color_space
self._ch_indexes = ch_indexes
print('Done!')
return self._target_histogram, self._background_histogram
def pdf(self, mean, cov, domain):
"""Multidimensional probability density function."""
pdf = multivariate_normal.pdf(domain, mean=mean, cov=cov)
pdf = pdf.reshape(self._bins)
return pdf
def create_gaussian_smoothed_histogram(self,
t_amp=1.0,
t_cov=0.05,
bg_amp=1.0,
bg_cov=0.025,
threshold=0.01,
norm=True):
"""Create a 2D or 3D gaussian-smoothed histogram.
A gaussian-smoothed histogram is built from target and background
pixels according to [1]: for each pixel in target region, a normal
distribution centered at its position is added to the histogram;
similarly, for each pixel at background, a normal distribution is
subtracted. Finally, thresholding is applied: color frequencies below
threshold times maximum frequency are set to zero.
[1] `Finding skin in color images`, R. Kjeldsen and J. Kender.
Proceedings of the Second International Conference on Automatic Face
and Gesture Recognition, 1996. DOI:10.1109/AFGR.1996.557283
Parameters
----------
t_amp : float, optional
Amplitude of target's normal distribution. Default is 1.0.
t_cov : float, optional
Covariance of target's normal distribution. Default is 0.05.
bg_amp : float, optional
Amplitude of background's normal distribution. Default is 1.0.
bg_cov : float, optional
Covariance of background's normal distribution. Default is 0.025.
threshold : float, optional
Color frequencies below threshold times maximum frequency are
set to zero. Default is 0.01.
norm : bool, optional
When True, histogram is normalized by maximum frequency. Default
is True.
Returns
-------
self._gaussian_smoothed_histogram : numpy.ndarray
2D or 3D gaussian-smoothed histogram.
"""
print('Generating gaussian-smoothed histogram...', end=' ')
self._grid = np.mgrid[tuple([slice(0, b) for b in self._bins])]
domain = np.column_stack([axis.flat for axis in self._grid])
gauss_sum = np.zeros(self._bins, dtype=np.float32)
t_cov = t_cov * min(self._bins)
bg_cov = bg_cov * min(self._bins)
t_hist = self._target_histogram
bg_hist = self._background_histogram
for pos in np.argwhere(t_hist):
pdf = self.pdf(pos, t_cov, domain) * t_amp
gauss_sum += pdf * t_hist[tuple(pos)]
for pos in np.argwhere(bg_hist):
pdf = - self.pdf(pos, bg_cov, domain) * bg_amp
gauss_sum += pdf * bg_hist[tuple(pos)]
gauss_sum[gauss_sum < threshold * np.max(gauss_sum)] = 0
if norm:
gauss_sum = gauss_sum / np.max(gauss_sum)
self._gaussian_smoothed_histogram = gauss_sum
print('Done!')
return self._gaussian_smoothed_histogram
def create_color_predicate(self, threshold=0, save=False, filename='color_predicate'):
"""Create a color predicate from gaussian-smoothed histogram.
Parameters
----------
threshold : int or float, optional
Histogram frequencies above threshold are set to one; frequencies
below threshold are set to zero. Default is 0.
save : bool, optional
If true, color predicate is saved as a numpy array. Default is
False.
filename : str, optional
Color predicate file name. Default is `color_predicate`
Returns
-------
color_predicate : numpy.ndarray
Color predicate with the same dimension as the histogram.
"""
color_predicate = self._gaussian_smoothed_histogram.copy()
color_predicate[color_predicate > threshold] = 1
color_predicate[color_predicate <= threshold] = 0
if save:
np.save(filename, color_predicate)
return color_predicate
def plot_gaussian_smoothed_histogram(self, figsize=(8, 8), dpi=75, save=False):
"""Plot a 2D or 3D gaussian-smoothed histogram.
When 2D, creates a pseudocolor histogram; when 3D, each bin is
represented by a circle with size proportional to its frequency.
Parameters
----------
figsize : tuple, optional
Matplotlib's `figsize` parameter. Default is (8, 8).
dpi : int, optional
Matplotlib's `dpi` parameter. Default is 75.
save : bool, optional
When true, saves the plot as a png file.
"""
print('Plotting gaussian smoothed histogram...', end=' ')
grid = self._grid
ranges = self.ch_ranges
bins = self._bins
channels = self._histogram_channels
histogram = self._gaussian_smoothed_histogram
color_space = self._histogram_color_space
axis = [(ranges[ch][1] / bins[i]) * grid[i] + (ranges[ch][1] / bins[i]) / 2
for i, ch in enumerate(channels)]
if histogram.ndim == 3:
colors = np.vstack((axis[0].flatten() / ranges[channels[0]][1],
axis[1].flatten() / ranges[channels[1]][1],
axis[2].flatten() / ranges[channels[2]][1])).T
colors = colors[:, tuple([channels.index(ch) for ch in color_space])]
if color_space == 'hsv':
colors = np.array([colorsys.hsv_to_rgb(color[0], color[1], color[2])
for color in colors])
elif color_space == 'bgr':
colors = colors[:, ::-1]
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111, projection='3d')
ax.title.set_position([0.5, 1.1])
ax.set_title(f'3D Color Histogram - '
f'{channels[0].title()} x '
f'{channels[1].title()} x '
f'{channels[2].title()}', fontsize=16)
ax.xaxis.set_tick_params(labelsize=8)
ax.yaxis.set_tick_params(labelsize=8)
ax.zaxis.set_tick_params(labelsize=8)
ax.set_xlim(ranges[channels[0]][0], ranges[channels[0]][1])
ax.set_ylim(ranges[channels[1]][0], ranges[channels[1]][1])
ax.set_zlim(ranges[channels[2]][0], ranges[channels[2]][1])
ax.set_xlabel(channels[0].title(), fontsize=12)
ax.set_ylabel(channels[1].title(), fontsize=12)
ax.set_zlabel(channels[2].title(), fontsize=12)
ax.view_init(azim=45)
ax.scatter(axis[0], axis[1], axis[2],
s=histogram * 1000,
c=colors)
if save:
ch_str = channels[0] + channels[1] + channels[2]
plt.savefig(f'{self.name}_3d_{ch_str}_histogram.png')
plt.show()
if self._gaussian_smoothed_histogram.ndim == 2:
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.set_title(f'2D Color Histogram - '
f'{channels[0].title()} x '
f'{channels[1].title()}')
ax.set_xlabel(channels[0].title(), fontsize=12)
ax.set_ylabel(channels[1].title(), fontsize=12, rotation=0)
h = ax.pcolormesh(axis[0], axis[1], histogram)
fig.colorbar(h, ax=ax)
if save:
ch_str = channels[0] + channels[1]
plt.savefig(f'{self.name}_2d_{ch_str}_histogram.png')
plt.show()
print('Done!')
@property
def total_pixel_count(self):
return self._total_pixel_count
@property
def gaussian_smoothed_histogram(self):
return self._gaussian_smoothed_histogram
@property
def true_pixels_histogram(self):
return self._target_histogram
@property
def false_pixels_histogram(self):
return self._background_histogram
@property
def color_predicate(self):
return self._color_predicate
def __str__(self):
description = f'''
{self.name.title()} color predicate.
Images: {len(self.images)}
Bins: {self._bins}
Color Space: {self._histogram_color_space}
Channels: {self._ch_indexes}
'''
return description
| 37.825986
| 90
| 0.56235
| 1,937
| 16,303
| 4.538462
| 0.170883
| 0.032761
| 0.051189
| 0.026391
| 0.31009
| 0.249005
| 0.19884
| 0.148334
| 0.119213
| 0.098737
| 0
| 0.02397
| 0.344906
| 16,303
| 430
| 91
| 37.913953
| 0.799157
| 0.249831
| 0
| 0.12069
| 0
| 0
| 0.068978
| 0.018254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064655
| false
| 0
| 0.030172
| 0.021552
| 0.150862
| 0.030172
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e23d085a14f192cef141c0732be27df361cf10b
| 4,456
|
py
|
Python
|
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | 1
|
2019-04-08T09:52:28.000Z
|
2019-04-08T09:52:28.000Z
|
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic_train.py
|
maxwellmckinnon/fastai
|
b67bf7184ac2be1825697709051c5bcba058a40d
|
[
"Apache-2.0"
] | 1
|
2020-05-19T12:56:20.000Z
|
2020-05-19T12:56:20.000Z
|
"""
module: basic_train.py - Model fitting methods
docs : https://docs.fast.ai/train.html
"""
import pytest, fastai
from fastai.vision import *
from utils.fakes import *
from utils.text import *
from utils.mem import *
from fastai.utils.mem import *
from math import isclose
torch_preload_mem()
@pytest.fixture(scope="module")
def data():
path = untar_data(URLs.MNIST_TINY)
data = ImageDataBunch.from_folder(path, ds_tfms=([], []), bs=2)
return data
# this is not a fixture on purpose - the memory measurement tests are very sensitive, so
# they need to be able to get a fresh learn object and not one modified by other tests.
def learn_large_unfit(data):
learn = create_cnn(data, models.resnet18, metrics=accuracy)
return learn
@pytest.fixture(scope="module")
def learn(data): return learn_large_unfit(data)
def test_get_preds():
learn = fake_learner()
with CaptureStdout() as cs:
a = learn.get_preds()
assert learn.data.batch_size == len(a[1])
def test_save_load(learn):
name = 'mnist-tiny-test-save-load'
# testing that all these various sequences don't break each other
model_path = learn.save(name, return_path=True)
learn.load(name, purge=True)
learn.data.sanity_check()
assert 709 == len(learn.data.train_ds)
learn.purge()
learn.load(name)
learn.load(name)
model_path = learn.save(name, return_path=True)
learn.load(name, purge=True)
# basic checks
#assert learn.recorder
assert learn.opt
assert 709 == len(learn.data.train_ds)
# XXX: could use more sanity checks
if os.path.exists(model_path): os.remove(model_path)
def check_mem_expected(used_exp, peaked_exp, mtrace, abs_tol=2, ctx=None):
used_received, peaked_received = mtrace.data()
ctx = f" ({ctx})" if ctx is not None else ""
assert isclose(used_exp, used_received, abs_tol=abs_tol), f"used mem: expected={used_exp} received={used_received}{ctx}"
assert isclose(peaked_exp, peaked_received, abs_tol=abs_tol), f"peaked mem: expected={peaked_exp} received={peaked_received}{ctx}"
def report_mem_real(used_exp, peaked_exp, mtrace, abs_tol=2, ctx=None):
ctx = f" ({ctx})" if ctx is not None else ""
print(f"{mtrace}{ctx}")
#check_mem_expected = report_mem_real
#@pytest.mark.skip(reason="WIP")
@pytest.mark.cuda
def test_save_load_mem_leak(data):
learn = learn_large_unfit(data)
name = 'mnist-tiny-test-save-load'
#learn.fit_one_cycle(1)
# A big difficulty with measuring memory consumption is that it varies quite
# wildly from one GPU model to another.
#
# Perhaps we need sets of different expected numbers per developer's GPUs?
# override check_mem_expected above with report_mem_real to acquire a new set
#
# So for now just testing the specific card I have until a better way is found.
dev_name = torch.cuda.get_device_name(None)
if dev_name != 'GeForce GTX 1070 Ti':
pytest.skip(f"currently only matched for mem usage on specific GPU models, {dev_name} is not one of them")
# save should consume no extra used or peaked memory
with GPUMemTrace() as mtrace:
model_path = learn.save(name, return_path=True)
check_mem_expected(used_exp=0, peaked_exp=0, mtrace=mtrace, abs_tol=10, ctx="save")
# load w/ purge still leaks some the first time it's run
with GPUMemTrace() as mtrace:
learn.load(name, purge=True)
# XXX: very different numbers if done w/o fit first 42 8, w/ fit 24 16
check_mem_expected(used_exp=42, peaked_exp=8, mtrace=mtrace, abs_tol=10, ctx="load")
# subsequent multiple load w/o purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.load(name, purge=False)
learn.load(name, purge=False)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="load x 2")
# subsequent multiple load w/ purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.load(name, purge=True)
learn.load(name, purge=True)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="load x 2 2nd time")
# purge + load w/ default purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.purge()
learn.load(name)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="purge+load")
if os.path.exists(model_path): os.remove(model_path)
| 38.08547
| 134
| 0.710727
| 704
| 4,456
| 4.353693
| 0.308239
| 0.021533
| 0.042414
| 0.041109
| 0.416639
| 0.346493
| 0.301468
| 0.274388
| 0.263948
| 0.218271
| 0
| 0.014038
| 0.184695
| 4,456
| 116
| 135
| 38.413793
| 0.829617
| 0.269973
| 0
| 0.422535
| 0
| 0
| 0.113834
| 0.040633
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.112676
| false
| 0
| 0.098592
| 0.014085
| 0.239437
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f508079561b7a2a57df3ea9bb24da6c3cf24ed29
| 13,454
|
py
|
Python
|
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 17
|
2019-11-15T06:27:05.000Z
|
2021-10-02T14:24:25.000Z
|
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | null | null | null |
examples/cartpole_example/test/cartpole_PID_MPC_sim.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 4
|
2020-09-03T17:01:34.000Z
|
2021-11-05T04:09:24.000Z
|
import numpy as np
import scipy.sparse as sparse
from scipy.integrate import ode
from scipy.interpolate import interp1d
import time
import control
import control.matlab
import numpy.random
import pandas as pd
from ltisim import LinearStateSpaceSystem
from pendulum_model import *
from pyMPC.mpc import MPCController
# Reference model default parameters
k_def = 5.0
tau_def = 120e-3
Acl_c_def = np.array([[0,1,0], [0, 0, k_def], [0, 0, -1/tau_def]])
Bcl_c_def = np.array([[0],
[k_def],
[1/tau_def]
])
# PID default parameters
Ts_PID = 1e-3
# Reference trajectory
t_ref_vec = np.array([0.0, 5.0, 10.0, 20.0, 25.0, 30.0, 40.0, 100.0])
p_ref_vec = np.array([0.0, 0.0, 0.8, 0.8, 0.0, 0.0, 0.8, 0.8])
rp_fun = interp1d(t_ref_vec, p_ref_vec, kind='linear')
def xref_cl_fun_def(t):
return np.array([rp_fun(t), 0.0, 0.0])
# MPC parameters
Ts_MPC_def = 10e-3
Qx_def = 1.0 * sparse.diags([1.0, 0, 10.0]) # Quadratic cost for states x0, x1, ..., x_N-1
QxN_def = Qx_def
Qr_def = 0.0 * sparse.eye(1) # Quadratic cost for u0, u1, ...., u_N-1
QDr_def = 1e-1 / (Ts_MPC_def ** 2) * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1
# Defaults
DEFAULTS_PENDULUM_MPC = {
'xref_cl_fun': xref_cl_fun_def,
'uref': np.array([0.0]), # N
'std_npos': 0*0.001, # m
'std_nphi': 0*0.00005, # rad
'std_dF': 0.05, # N
'w_F':20, # rad
'len_sim': 40, #s
'Acl_c': Acl_c_def,
'Bcl_c': Bcl_c_def,
'Ts_MPC': Ts_MPC_def,
'Np': 100,
'Nc': 50,
'Qx': Qx_def,
'QxN': QxN_def,
'Qr': Qr_def,
'QDr': QDr_def,
'Q_kal': np.diag([0.1, 10, 0.1, 10]),
'R_kal': 1*np.eye(2),
'QP_eps_abs': 1e-3,
'QP_eps_rel': 1e-3,
'seed_val': None
}
def get_parameter(sim_options, par_name):
return sim_options.get(par_name, DEFAULTS_PENDULUM_MPC[par_name])
def get_default_parameters(sim_options):
""" Which parameters are left to default ??"""
default_keys = [key for key in DEFAULTS_PENDULUM_MPC if key not in sim_options]
return default_keys
def simulate_pendulum_MPC(sim_options):
seed_val = get_parameter(sim_options,'seed_val')
if seed_val is not None:
np.random.seed(seed_val)
# In[Sample times]
Ts_MPC = get_parameter(sim_options, 'Ts_MPC')
ratio_Ts = int(Ts_MPC // Ts_PID)
# In[Real System]
Cc = np.array([[1., 0., 0., 0.],
[0., 0., 1., 0.]])
Cd = np.copy(Cc)
nx, nu = 4,1
ny = 2
# In[initialize simulation system]
t0 = 0
phi0 = -0.0 * 2 * np.pi / 360 # initial angle
x0 = np.array([0, 0, phi0, 0]) # initial state
#system_dyn = ode(f_ODE_wrapped).set_integrator('vode', method='bdf') # dopri5
system_dyn = ode(f_ODE_wrapped).set_integrator('dopri5') # dopri5
# system_dyn = ode(f_ODE_wrapped).set_integrator('dopri5')
system_dyn.set_initial_value(x0, t0)
system_dyn.set_f_params(0.0)
#In[MPC params --model]
Acl_c = get_parameter(sim_options, 'Acl_c')
Bcl_c = get_parameter(sim_options, 'Bcl_c')
Ccl_c = np.array([[1., 0., 0],
[0., 0., 1]])
Dcl_c = np.zeros((2, 1))
ncl_x, ncl_u = Bcl_c.shape # number of states and number or inputs
#ncl_y = np.shape(Ccl_c)[0]
#In[MPC matrices discretization]
Acl_d = np.eye(ncl_x) + Acl_c*Ts_MPC
Bcl_d = Bcl_c*Ts_MPC
Ccl_d = Ccl_c
Dcl_d = Dcl_c
x0_cl = np.array([0,0,phi0])
M_cl = LinearStateSpaceSystem(A=Acl_d, B=Bcl_d, C=Ccl_d, D=Dcl_d, x0=x0_cl)
# MPC parameters
Np = get_parameter(sim_options, 'Np')
Nc = get_parameter(sim_options, 'Nc')
Qx = get_parameter(sim_options, 'Qx')
QxN = get_parameter(sim_options, 'QxN')
Qr = get_parameter(sim_options, 'Qr')
QDr = get_parameter(sim_options, 'QDr')
# Constraints
#xmin = np.array([-1.5, -100, -100])
#xmax = np.array([1.5, 100.0, 100])
#umin = np.array([-10])
#umax = np.array([10])
#Dumin = np.array([-100 * Ts_MPC_def])
#Dumax = np.array([100 * Ts_MPC_def])
QP_eps_rel = get_parameter(sim_options, 'QP_eps_rel')
QP_eps_abs = get_parameter(sim_options, 'QP_eps_abs')
# Emergency exit conditions
EMERGENCY_STOP = False
EMERGENCY_POS = 2.0
EMERGENCY_ANGLE = 30 * DEG_TO_RAD
# Reference input and states
xref_cl_fun = get_parameter(sim_options, 'xref_cl_fun') # reference state
xref_cl_fun_v = np.vectorize(xref_cl_fun, signature='()->(n)')
t0 = 0
xref_MPC = xref_cl_fun(t0)
uref = get_parameter(sim_options, 'uref')
uminus1 = np.array([0.0]) # input at time step negative one - used to penalize the first delta u at time instant 0. Could be the same as uref.
kMPC = MPCController(Acl_d, Bcl_d, Np=Np, Nc=Nc, x0=x0_cl, xref=xref_MPC, uminus1=uminus1,
Qx=Qx, QxN=QxN, Qu=Qr, QDu=QDr,
eps_feas=1e3, eps_rel=QP_eps_rel, eps_abs=QP_eps_abs)
try:
kMPC.setup(solve=True) # setup initial problem and also solve it
except:
EMERGENCY_STOP = True
if not EMERGENCY_STOP:
if kMPC.res.info.status != 'solved':
EMERGENCY_STOP = True
# In[initialize PID]
# Default controller parameters -
P = -100.0
I = -1
D = -20
N = 100.0
kP = control.tf(P,1, Ts_PID)
kI = I*Ts_PID*control.tf([0, 1], [1,-1], Ts_PID)
kD = D*control.tf([N, -N], [1.0, Ts_PID*N - 1], Ts_PID)
PID_tf = kP + kD + kI
PID_ss = control.ss(PID_tf)
k_PID = LinearStateSpaceSystem(A=PID_ss.A, B=PID_ss.B, C=PID_ss.C, D=PID_ss.D)
# In[initialize noise]
# Standard deviation of the measurement noise on position and angle
std_npos = get_parameter(sim_options, 'std_npos')
std_nphi = get_parameter(sim_options, 'std_nphi')
# Force disturbance
std_dF = get_parameter(sim_options, 'std_dF')
# Disturbance power spectrum
w_F = get_parameter(sim_options, 'w_F') # bandwidth of the force disturbance
tau_F = 1 / w_F
Hu = control.TransferFunction([1], [1 / w_F, 1])
Hu = Hu * Hu
Hud = control.matlab.c2d(Hu, Ts_PID)
N_sim_imp = tau_F / Ts_PID * 20
t_imp = np.arange(N_sim_imp) * Ts_PID
t, y = control.impulse_response(Hud, t_imp)
y = y[0]
std_tmp = np.sqrt(np.sum(y ** 2)) # np.sqrt(trapz(y**2,t))
Hu = Hu / (std_tmp) * std_dF
N_skip = int(20 * tau_F // Ts_PID) # skip initial samples to get a regime sample of d
t_sim_d = get_parameter(sim_options, 'len_sim') # simulation length (s)
N_sim_d = int(t_sim_d // Ts_PID)
N_sim_d = N_sim_d + N_skip + 1
e = np.random.randn(N_sim_d)
te = np.arange(N_sim_d) * Ts_PID
_, d, _ = control.forced_response(Hu, te, e)
d = d.ravel()
# Simulate in closed loop
len_sim = get_parameter(sim_options, 'len_sim') # simulation length (s)
nsim = int(len_sim // Ts_MPC) #int(np.ceil(len_sim / Ts_MPC)) # simulation length(timesteps) # watch out! +1 added, is it correct?
t_vec = np.zeros((nsim, 1))
status_vec = np.zeros((nsim,1))
x_vec = np.zeros((nsim, nx))
x_ref_vec = np.zeros((nsim, ncl_x))
y_vec = np.zeros((nsim, ny))
y_meas_vec = np.zeros((nsim, ny))
u_vec = np.zeros((nsim, nu))
x_model_vec = np.zeros((nsim,3))
nsim_fast = int(len_sim // Ts_PID)
t_vec_fast = np.zeros((nsim_fast, 1))
x_vec_fast = np.zeros((nsim_fast, nx)) # finer integration grid for performance evaluation
ref_phi_vec_fast = np.zeros((nsim_fast, 1))
y_meas_vec_fast = np.zeros((nsim_fast, ny))
x_ref_vec_fast = np.zeros((nsim_fast, nx)) # finer integration grid for performance evaluatio
u_vec_fast = np.zeros((nsim_fast, nu)) # finer integration grid for performance evaluatio
Fd_vec_fast = np.zeros((nsim_fast, nu)) #
t_int_vec_fast = np.zeros((nsim_fast, 1))
emergency_vec_fast = np.zeros((nsim_fast, 1)) #
t_step = t0
x_step = x0
u_PID = None
t_pred_all = t0 + np.arange(nsim + Np + 1) * Ts_MPC
Xref_MPC_all = xref_cl_fun_v(t_pred_all)
for idx_fast in range(nsim_fast):
## Determine step type: fast simulation only or MPC step
idx_MPC = idx_fast // ratio_Ts
run_MPC_controller = (idx_fast % ratio_Ts) == 0
y_step = Cd.dot(x_step) # y[i] from the system
ymeas_step = np.copy(y_step)
ymeas_step[0] += std_npos * np.random.randn()
ymeas_step[1] += std_nphi * np.random.randn()
y_meas_vec_fast[idx_fast,:] = ymeas_step
# Output for step i
# Ts_MPC outputs
if run_MPC_controller: # it is also a step of the simulation at rate Ts_MPC
if idx_MPC < nsim:
t_vec[idx_MPC, :] = t_step
y_vec[idx_MPC,:] = y_step
y_meas_vec[idx_MPC,:] = ymeas_step
u_vec[idx_MPC, :] = u_PID
x_model_vec[idx_MPC, :] = M_cl.x.ravel()
xref_MPC = xref_cl_fun(t_step)
x_ref_vec[idx_MPC,:] = xref_MPC.ravel()
if not EMERGENCY_STOP:
phi_ref_MPC, info_MPC = kMPC.output(return_status=True) # u[i] = k(\hat x[i]) possibly computed at time instant -1
else:
phi_ref_MPC = np.zeros(nu)
# PID angle CONTROLLER
ref_phi = phi_ref_MPC.ravel()
error_phi = ref_phi - ymeas_step[1]
u_PID = k_PID.output(error_phi)
u_PID[u_PID > 10.0] = 10.0
u_PID[u_PID < -10.0] = -10.0
u_TOT = u_PID
# Ts_fast outputs
t_vec_fast[idx_fast,:] = t_step
x_vec_fast[idx_fast, :] = x_step #system_dyn.y
u_vec_fast[idx_fast,:] = u_TOT
Fd_vec_fast[idx_fast,:] = 0.0
ref_phi_vec_fast[idx_fast,:] = ref_phi
## Update to step i+1
k_PID.update(error_phi)
# Controller simulation step at rate Ts_MPC
if run_MPC_controller:
M_cl.update(ref_phi)
if not EMERGENCY_STOP:
x_cl = np.array([x_step[0], x_step[1], x_step[2]])
Xref_MPC = Xref_MPC_all[idx_MPC:idx_MPC + Np + 1]
xref_MPC = Xref_MPC_all[idx_MPC]
kMPC.update(x_cl, phi_ref_MPC, xref=xref_MPC) # update with measurement and reference
# System simulation step at rate Ts_fast
time_integrate_start = time.perf_counter()
system_dyn.set_f_params(u_TOT)
system_dyn.integrate(t_step + Ts_PID)
x_step = system_dyn.y
t_int_vec_fast[idx_fast,:] = time.perf_counter() - time_integrate_start
# Time update
t_step += Ts_PID
simout = {'t': t_vec, 'x': x_vec, 'u': u_vec, 'y': y_vec, 'y_meas': y_meas_vec, 'x_ref': x_ref_vec, 'status': status_vec, 'Fd_fast': Fd_vec_fast,
't_fast': t_vec_fast, 'x_fast': x_vec_fast, 'x_ref_fast': x_ref_vec_fast, 'u_fast': u_vec_fast, 'y_meas_fast': y_meas_vec_fast, 'emergency_fast': emergency_vec_fast,
'PID_tf': PID_tf, 'Ts_MPC': Ts_MPC, 'ref_phi_fast': ref_phi_vec_fast, 'x_model': x_model_vec,
't_int_fast': t_int_vec_fast
}
return simout
if __name__ == '__main__':
import matplotlib.pyplot as plt
import matplotlib
plt.close('all')
simopt = DEFAULTS_PENDULUM_MPC
time_sim_start = time.perf_counter()
simout = simulate_pendulum_MPC(simopt)
time_sim = time.perf_counter() - time_sim_start
t = simout['t']
x = simout['x']
u = simout['u']
y = simout['y']
y_meas = simout['y_meas']
x_ref = simout['x_ref']
x_fast = simout['x_fast']
y_meas_fast = simout['y_meas_fast']
u_fast = simout['u_fast']
x_model = simout['x_model']
t_fast = simout['t_fast']
x_ref_fast = simout['x_ref_fast']
F_input = simout['Fd_fast']
status = simout['status']
ref_phi_fast = simout['ref_phi_fast']
uref = get_parameter(simopt, 'uref')
nsim = len(t)
nx = x.shape[1]
ny = y.shape[1]
y_ref = x_ref[:, [0, 2]]
fig,axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
axes[0].plot(t, y_meas[:, 0], "b", label='p_meas')
axes[0].plot(t_fast, x_fast[:, 0], "k", label='p')
axes[0].plot(t, x_model[:, 0], "r", label='p model')
axes[0].plot(t, x_ref[:, 0], "k--", label='p reference')
axes[0].set_ylim(-2.0,2.0)
axes[0].set_title("Position (m)")
axes[1].plot(t_fast, x_fast[:, 1], "k", label='v')
axes[1].plot(t, x_model[:, 1], "r", label='v model')
axes[1].set_ylim(-3,3.0)
axes[1].set_title("Speed (m/s)")
axes[2].plot(t, y_meas[:, 1]*RAD_TO_DEG, "b", label='phi_meas')
axes[2].plot(t_fast, x_fast[:, 2]*RAD_TO_DEG, 'k', label="phi")
axes[2].plot(t, x_model[:, 2]*RAD_TO_DEG, "r", label='phi model')
axes[2].plot(t_fast, ref_phi_fast[:,0]*RAD_TO_DEG, "k--", label="phi_ref")
axes[2].set_ylim(-20,20)
axes[2].set_title("Angle (deg)")
axes[3].plot(t, u[:,0], label="F")
axes[3].plot(t_fast, F_input, "k", label="Fd")
axes[3].plot(t, uref*np.ones(np.shape(t)), "r--", label="F_ref")
axes[3].set_ylim(-20,20)
axes[3].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
X = np.hstack((t_fast, x_fast, u_fast, y_meas_fast, F_input))
COL_T = ['time']
COL_X = ['p', 'v', 'theta', 'omega']
COL_U = ['u']
COL_D = ['d']
COL_Y = ['p_meas', 'theta_meas']
COL = COL_T + COL_X + COL_U + COL_Y + COL_D
df_X = pd.DataFrame(X, columns=COL)
df_X.to_csv("pendulum_data_PID.csv", index=False)
| 33.219753
| 179
| 0.612829
| 2,278
| 13,454
| 3.330553
| 0.147498
| 0.008699
| 0.041518
| 0.060894
| 0.185317
| 0.114802
| 0.07816
| 0.050481
| 0.043627
| 0.027943
| 0
| 0.03406
| 0.242753
| 13,454
| 404
| 180
| 33.30198
| 0.71064
| 0.156162
| 0
| 0.031579
| 0
| 0
| 0.058469
| 0.001863
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014035
| false
| 0
| 0.049123
| 0.007018
| 0.077193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f50910b14f5b09655a9e1eaecc696a5cfe950b0f
| 4,923
|
py
|
Python
|
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | 1
|
2018-10-09T14:41:59.000Z
|
2018-10-09T14:41:59.000Z
|
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | null | null | null |
settings.py
|
msetzu/data-mining
|
9e01d00964004dea4a2aea88dfe855f785302ef1
|
[
"MIT"
] | null | null | null |
import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
# Dataset
data = pd.read_csv("./hr.csv")
entries = len(data)
bins = 10
# Data analysis
analysis = {
"bins": 10,
"balance_threshold": 0.1
}
# Plot labels
labels = ["satisfaction_level",
"average_montly_hours",
"last_evaluation",
"time_spend_company",
"number_project",
"Work_accident",
"left",
"promotion_last_5years",
"sales",
"salary"]
pretty_prints = ["Self-reported satisfaction",
"AVG Monthly hours",
"Time since last valuation, in years",
"Time in company, in years",
"Projects",
"Accidents",
"Left",
"Promoted (last 5 years)",
"Department",
"Salary"]
short_pretty_prints = ["Injuries",
"Work hours",
"Last evaluation",
"Left",
"Projects",
"Promotion",
"Wage",
"Satisfaction",
"Years in company",
"Dpt."]
departments_pretty_prints = ["Information Technology",
"R&D",
"Accounting",
"Human Resources",
"Management",
"Marketing",
"Product Management",
"Sales",
"Support",
"Technical"]
labels_pretty_print = {k: v for k, v in zip(labels, pretty_prints)}
short_labels_pretty_print = {k: v for k, v in zip(labels, short_pretty_prints)}
labels_pretty_print["salary_int"] = "Salary"
continuous_labels = labels[0:2]
discrete_labels = labels[2:5]
categorical_labels = labels[5:-1]
ordinal_labels = labels[-1:]
correlated_labels = continuous_labels + discrete_labels + ["salary_int"]
categorical_labels_pretty_prints = {
"Work_accident": ("Not Injured", "Injured"),
"left": ("Stayed", "Left"),
"promotion_last_5years": ("Not promoted", "Promoted"),
"sales": tuple(departments_pretty_prints)
}
ordinal_labels_pretty_prints = {
"salary": ("Low", "Medium", "High"),
}
ordered_ordinal_vars = {
"salary": ["low", "medium", "high"]
}
departments = set(data["sales"])
# Scatter plot
scatter = {
"sampling_size": 100, # size of each sample
"samples": 5, # number of samples to extract
"edge_bins": 1, # edge bins possibly containing outliers
"bins": 10,
"replace": True
}
clusetering_types = ["normal", "discrete", "raw"]
# Graphs
palette = {
"main": "#FE4365",
"complementary": "#FC9D9A",
"pr_complementary": "#F9CDAD",
"sc_complementary": "#C8C8A9",
"secondary": "#83AF9B"
}
round_palette = {
"main": palette["secondary"],
"secondary": palette["complementary"],
"pr_complementary": palette["sc_complementary"],
"sc_complementary": palette["secondary"]
}
large_palette = {
"navy": "#001f3f",
"blue": "#0074D9",
"green": "#2ECC40",
"olive": "#3D9970",
"orange": "#FF851B",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA"
}
large_palette_full = {
"navy": "#001f3f",
"blue": "#0074D9",
"aqua": "#7FDBFF",
"teal": "#39CCCC",
"olive": "#3D9970",
"green": "#2ECC40",
"lime": "#01FF70",
"yellow": "#FFDC00",
"orange": "#FF851B",
"red": "#FF4136",
"maroon": "#85144b",
"fuchsia": "#F012BE",
"purple": "#B10DC9",
"black": "#111111",
"grey": "#AAAAAA",
"silver": "#DDDDDD"
}
large_palette_stacked = {
"navy": "#001f3f",
"blue": "#0074D9",
"olive": "#3D9970",
"orange": "#FF851B",
"green": "#2ECC40",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA",
"stack": large_palette["orange"]
}
cmap_pale_pink = LinearSegmentedColormap.from_list("Pale pink",
[palette["pr_complementary"], palette["main"]],
N=1000000)
cmap_pale_pink_and_green = LinearSegmentedColormap.from_list("Pale pink&green",
[palette["main"],
palette["complementary"],
palette["pr_complementary"],
palette["sc_complementary"],
palette["secondary"]],
N=1000000)
| 30.018293
| 98
| 0.487508
| 407
| 4,923
| 5.712531
| 0.425061
| 0.04129
| 0.021935
| 0.025806
| 0.141935
| 0.076559
| 0.076559
| 0.076559
| 0.076559
| 0.076559
| 0
| 0.055716
| 0.365631
| 4,923
| 163
| 99
| 30.202454
| 0.688761
| 0.028641
| 0
| 0.256944
| 0
| 0
| 0.307144
| 0.0088
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.013889
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f509ca15e0e12b426c5e187595364f7eea92a920
| 397
|
py
|
Python
|
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | 1
|
2021-03-01T04:15:08.000Z
|
2021-03-01T04:15:08.000Z
|
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | null | null | null |
GCD - Euclidean (Basic)/Python3/gcdEuclid.py
|
i-vishi/ds-and-algo
|
90a8635db9570eb17539201be29ec1cfd4b5ae18
|
[
"MIT"
] | null | null | null |
# Author: Vishal Gaur
# Created: 17-01-2021 20:31:34
# function to find GCD using Basic Euclidean Algorithm
def gcdEuclid(a, b):
if a == 0:
return b
else:
return gcdEuclid(b % a, a)
# Driver Code to test above function
a = 14
b = 35
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
a = 56
b = 125
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
| 17.26087
| 54
| 0.566751
| 69
| 397
| 3.26087
| 0.550725
| 0.044444
| 0.146667
| 0.106667
| 0.24
| 0.24
| 0.24
| 0.24
| 0.24
| 0.24
| 0
| 0.083045
| 0.27204
| 397
| 22
| 55
| 18.045455
| 0.695502
| 0.352645
| 0
| 0.307692
| 0
| 0
| 0.087302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f50f1a90c240661a8974cc7923b38f46dce70bae
| 29,856
|
py
|
Python
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 7
|
2017-07-16T05:59:07.000Z
|
2018-01-22T09:35:21.000Z
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 17
|
2017-07-31T20:35:24.000Z
|
2018-02-26T22:00:12.000Z
|
views.py
|
milos-korenciak/2018.ossconf.sk
|
f121dde4f313a207e39c2f2e187bdad046b86592
|
[
"MIT"
] | 13
|
2017-08-01T17:03:40.000Z
|
2021-11-02T13:24:30.000Z
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import os
import re
import textwrap
import requests
import unicodedata
from datetime import datetime, timedelta
from flask import Flask, g, request, render_template, abort, make_response
from flask_babel import Babel, gettext
from jinja2 import evalcontextfilter, Markup
app = Flask(__name__, static_url_path='/static')
app.config['BABEL_DEFAULT_LOCALE'] = 'sk'
app.jinja_options = {'extensions': ['jinja2.ext.with_', 'jinja2.ext.i18n']}
babel = Babel(app)
EVENT = gettext('PyCon SK 2018')
DOMAIN = 'https://2018.pycon.sk'
API_DOMAIN = 'https://api.pycon.sk'
LANGS = ('en', 'sk')
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
NOW = datetime.utcnow().strftime(TIME_FORMAT)
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LOGO_PYCON = 'logo/pycon_logo_square.svg'
LDJSON_SPY = {
"@type": "Organization",
"name": "SPy o. z.",
"url": "https://spy.pycon.sk",
"logo": "https://spy.pycon.sk/img/logo/spy-logo.png",
"sameAs": [
"https://facebook.com/pyconsk",
"https://twitter.com/pyconsk",
"https://www.linkedin.com/company/spy-o--z-",
"https://github.com/pyconsk",
]
}
LDJSON_PYCON = {
"@context": "http://schema.org",
"@type": "Event",
"name": EVENT,
"description": gettext("PyCon will be back at Slovakia in 2018 again. PyCon SK is a community-organized conference "
"for the Python programming language."),
"startDate": "2018-03-09T9:00:00+01:00",
"endDate": "2018-03-11T18:00:00+01:00",
"image": DOMAIN + "/static/img/logo/pycon_long_2018.png",
"location": {
"@type": "Place",
"name": "FIIT STU",
"address": {
"@type": "PostalAddress",
"streetAddress": "Ilkovičova 2",
"addressLocality": "Bratislava 4",
"postalCode": "842 16",
"addressCountry": gettext("Slovak Republic")
},
},
"url": DOMAIN,
"workPerformed": {
"@type": "CreativeWork",
"name": EVENT,
"creator": LDJSON_SPY
}
}
# calendar settings
ICAL_LEN = 70 # length of a calendar (ical) line
ICAL_NL = '\\n\n' # calendar newline
IGNORE_TALKS = ['Break', 'Coffee Break']
TYPE = {
'talk': gettext('Talk'),
'workshop': gettext('Workshop'),
}
TAGS = {
'ai': gettext('Machine Learning / AI'),
'community': gettext('Community / Diversity / Social'),
'data': gettext('Data Science'),
'devops': 'DevOps',
'docs': gettext('Documentation'),
'edu': gettext('Education'),
'generic': gettext('Python General'),
'security': gettext('Security'),
'softskills': gettext('Soft Skills'),
'hardware': gettext('Hardware'),
'web': gettext('Web Development'),
'other': gettext('Other'),
}
FRIDAY_START = datetime(2018, 3, 9, hour=9)
SATURDAY_START = datetime(2018, 3, 10, hour=9)
SUNDAY_START = datetime(2018, 3, 11, hour=10, minute=15)
FRIDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 15, 'title': gettext("FaaS and Furious - Zero to Serverless in 60 seconds - Anywhere")},
{"pause": 15, 'title': gettext("Docs or it didn't happen")},
{"pause": 5, 'title': gettext("GraphQL is the new black")},
{"pause": 60, 'title': gettext("To the Google in 80 Days")},
{"pause": 5, 'title': gettext("Unsafe at Any Speed")},
{"pause": 15, 'title': gettext("Protecting Privacy and Security — For Yourself and Your Community")},
{"pause": 5, 'title': gettext("ZODB: The Graph database for Python Developers.")},
{"pause": 15, 'title': gettext("Differentiable programming in Python and Gluon for (not only medical) image analysis")},
{"pause": 5, 'title': gettext("Vim your Python, Python your Vim")},
)
FRIDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Python Days in Martin and follow-up activities")},
{"pause": 15, 'title': gettext("Python programming till graduation")},
{"pause": 5, 'title': gettext("Open educational resources for learning Python")},
{"pause": 60, 'title': gettext("About Ninjas and Mentors: CoderDojo in Slovakia")},
{"pause": 5, 'title': gettext("Community based courses")},
{"pause": 15, 'title': gettext("How do we struggle with Python in Martin?")},
{"pause": 5, 'title': gettext("Why hardware attracts kids and adults to IT")},
{"pause": 5, 'title': gettext("Panel discussion: Teaching IT in Slovakia - where is it heading?")},
{"pause": 5, 'title': gettext("EDU Talks"), 'duration': 30, 'language': 'SK', 'flag': 'edu', 'type': 'talk'},
)
FRIDAY_WORKSHOPS1 = (
{"pause": 10, 'title': gettext("How to create interactive maps in Python / R")},
{"pause": 60, 'title': gettext("Working with XML")},
{"pause": 5, 'title': gettext("Managing high-available applications in production")},
)
FRIDAY_WORKSHOPS2 = (
{"pause": 40, 'title': gettext("Workshop: An Introduction to Ansible")},
{"pause": 5, 'title': gettext("Introduction to Machine Learning with Python")},
)
FRIDAY_HALLWAY = (
{"pause": 0, 'title': gettext("OpenPGP key-signing party"), 'duration': 30, 'link': 'https://github.com/pyconsk/2018.pycon.sk/tree/master/openpgp-key-signing-party', 'flag': 'security'},
)
SATURDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 5, 'title': gettext("Solutions Reviews")},
{"pause": 15, 'title': gettext("Campaign Automation & Abusing Celery Properly")},
{"pause": 5, 'title': gettext("The Truth about Mastering Big Data")},
{"pause": 5, 'title': gettext("Industrial Machine Learning: Building scalable distributed machine learning pipelines with Python")},
{"pause": 25, 'title': gettext("Programming contest Semi finale"), 'duration': 30, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Pythonic code, by example")},
{"pause": 15, 'title': gettext("Our DevOps journey, is SRE the next stop?")},
{"pause": 5, 'title': gettext("Implementing distributed systems with Consul")},
{"pause": 15, 'title': gettext("Designing fast and scalable Python MicroServices with django")},
{"pause": 5, 'title': gettext("When your wetware has too many threads - Tips from an ADHDer on how to improve your focus")},
{"pause": 5, 'title': gettext("Programming Python as performance: live coding with FoxDot")},
{"pause": 5, 'title': gettext("Programming Contest Grand Finale"), 'duration': 30, 'flag': 'other', 'type': 'talk', 'language': 'EN'},
{"pause": 5, 'title': gettext("Lightning Talks"), 'duration': 45, 'flag': 'other', 'type': 'talk'},
)
SATURDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Meteo data in Python. Effectively.")},
{"pause": 15, 'title': gettext("Around the World in 30 minutes")},
{"pause": 5, 'title': gettext("LOCKED SHIELDS: What a good cyber testing looks like")},
{"pause": 60, 'title': gettext("Kiwi.com in ZOO")},
{"pause": 5, 'title': gettext("Keynote in Kiwi.com Hall"), 'duration': 30, 'flag': 'generic', 'type': 'talk'},
{"pause": 15, 'title': gettext("Skynet your Infrastructure with QUADS")},
{"pause": 5, 'title': gettext("Automated network OS testing")},
{"pause": 15, 'title': gettext("Tools to interact with Bitcoin and Ethereum")},
{"pause": 5, 'title': gettext("7 Steps to a Clean Issue Tracker")},
{"pause": 5, 'title': gettext("The Concierge Paradigm")},
)
SATURDAY_WORKSHOPS1 = (
{"pause": 55, 'title': gettext("Effectively running python applications in Kubernetes/OpenShift")},
{"pause": 5, 'title': gettext("Roboworkshop")},
)
SATURDAY_WORKSHOPS2 = (
{"pause": 55, 'title': gettext("Microbit:Slovakia")},
{"pause": 5, 'title': gettext("Coding in Python: A high-school programming lesson")},
)
SATURDAY_HALLWAY1 = (
{"pause": 0, 'title': gettext("Pandas documentation sprint"), 'duration': 360, 'link': 'https://python-sprints.github.io/pandas/', 'flag': 'docs'},
)
SATURDAY_HALLWAY2 = (
{"pause": 145, 'title': gettext("Programming contest"), 'duration': 95, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Conference organizers meetup"), 'duration': 30, 'flag': 'community'},
)
SUNDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Charon and the way out from a pickle hell")},
{"pause": 15, 'title': gettext("Making Python Behave")},
{"pause": 5, 'title': gettext("“Secret” information about the code we write")},
{"pause": 60, 'title': gettext("How to connect objects with each other in different situations with Pythonic ways - association, aggregation, composition and etc.")},
{"pause": 5, 'title': gettext("APIs: Gateway to world's data")},
{"pause": 15, 'title': gettext("Getting started with HDF5 and PyTables")},
{"pause": 5, 'title': gettext("Real-time personalized recommendations using embeddings")},
{"pause": 5, 'title': gettext("Quiz"), 'duration': 30, 'flag': 'other', 'type': 'talk'},
)
SUNDAY_WORKSHOPS1 = (
{"pause": 40, 'title': gettext("Real-time transcription and sentiment analysis of audio streams; on the phone and in the browser")},
{"pause": 5, 'title': gettext("Learn MongoDB by modeling PyPI in a document database")},
)
SUNDAY_WORKSHOPS2 = (
{"pause": 15, 'title': gettext("Testing Essentials for Scientists and Engineers")},
{"pause": 5, 'title': gettext("Cython: Speed up your code without going insane")},
)
SUNDAY_WORKSHOPS3 = (
{"pause": 15, 'title': gettext("Meet the pandas")},
{"pause": 5, 'title': gettext("Serverless with OpenFaaS and Python")},
)
SUNDAY_WORKSHOPS4 = (
{"pause": 5, 'title': gettext("Django Girls"), 'duration': 540, 'flag': 'web', 'type': 'workshop'},
)
SUNDAY_HALLWAY = (
{"pause": 5, 'title': gettext("Documentation clinic/helpdesk")},
)
AULA1 = {
'name': gettext('Kiwi.com Hall'),
'number': '-1.61',
}
AULA2 = {
'name': gettext('Python Software Foundation Hall'),
'number': '-1.65',
}
AULA3 = {
'name': gettext('SPy - Hall A'),
'number': '-1.57',
}
AULA4 = {
'name': gettext('SPy - Hall B'),
'number': '-1.57',
}
AULA5 = {
'name': gettext('Django Girls Auditorium'),
'number': '+1.31',
}
HALLWAY = {
'name': gettext('Hallway'),
'number': '',
}
def get_conference_data(url='', filters=''):
"""Connect to API and get public talks and speakers data."""
url = API_DOMAIN + url
if filters:
url = url + '&' + filters
r = requests.get(url)
return r.json()
API_DATA_SPEAKERS = get_conference_data(url='/event/2018/speakers/')
API_DATA_TALKS = get_conference_data(url='/event/2018/talks/')
@app.before_request
def before():
if request.view_args and 'lang_code' in request.view_args:
g.current_lang = request.view_args['lang_code']
if request.view_args['lang_code'] not in LANGS:
return abort(404)
request.view_args.pop('lang_code')
@babel.localeselector
def get_locale():
# try to guess the language from the user accept
# header the browser transmits. The best match wins.
# return request.accept_languages.best_match(['de', 'sk', 'en'])
return g.get('current_lang', app.config['BABEL_DEFAULT_LOCALE'])
@app.template_filter()
@evalcontextfilter
def linebreaks(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def linebreaksbr(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'%s' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def strip_accents(eval_ctx, value):
"""Strip non ASCII characters and convert them to ASCII."""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode("utf-8")
def _get_template_variables(**kwargs):
"""Collect variables for template that repeats, e.g. are in body.html template"""
lang = get_locale()
variables = {
'title': EVENT,
'logo': LOGO_PYCON, # TODO: Do we need this?
'ld_json': LDJSON_PYCON
}
variables['ld_json']['url'] = DOMAIN + '/' + lang + '/'
variables.update(kwargs)
if 'current_lang' in g:
variables['lang_code'] = g.current_lang
else:
variables['lang_code'] = app.config['BABEL_DEFAULT_LOCALE']
return variables
def generate_track(api_data, track_data, start, flag=None):
"""Helper function to mix'n'match API data, with schedule order defined here, to generate schedule dict"""
template_track_data = []
for talk in track_data:
# Check if talk is in API
talk_api_data = next((item for item in api_data if item['title'] == talk['title']), None)
# If talk is not in API data we'll use text from track_data dict == same structure for template generation
if not talk_api_data:
talk_api_data = talk
if not flag or ('flag' in talk_api_data and flag == talk_api_data['flag']):
# Store data to be displayed in template
template_track_data.append({
"start": start,
"talk": talk_api_data
})
start = start + timedelta(minutes=talk_api_data.get('duration', 0))
# start = start + timedelta(minutes=talk_api_data['duration'])
if not flag:
# Generate break
break_name = gettext('Break')
if talk['pause'] in (40, 60):
break_name = gettext('Lunch 🍱')
if talk['pause'] in (15, 20):
break_name = gettext('Coffee Break ☕')
template_track_data.append({
'start': start,
'talk': {'title': break_name},
'css': 'break'
})
start = start + timedelta(minutes=talk['pause']) # break time does not comes from API always defined in track
return template_track_data
def generate_schedule(api_data, flag=None):
return [
{
'room': AULA1,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK1, FRIDAY_START, flag=flag),
'day': 'friday',
'block_start': True,
},
{
'room': AULA2,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK2, FRIDAY_START, flag=flag),
'day': 'friday'
},
{
'room': AULA3,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS1, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday'
},
{
'room': AULA4,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS2, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday',
},
{
'room': HALLWAY,
'start': FRIDAY_START+timedelta(minutes=395),
'schedule': generate_track(api_data, FRIDAY_HALLWAY, FRIDAY_START+timedelta(minutes=395), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK1, SATURDAY_START, flag=flag),
'day': 'saturday',
'block_start': True,
},
{
'room': AULA2,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK2, SATURDAY_START, flag=flag),
'day': 'saturday'
},
{
'room': AULA3,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS1, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': AULA4,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=60),
'schedule': generate_track(api_data, SATURDAY_HALLWAY1, SATURDAY_START+timedelta(minutes=60), flag=flag),
'day': 'saturday',
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=30),
'schedule': generate_track(api_data, SATURDAY_HALLWAY2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_TRACK1, SUNDAY_START, flag=flag),
'day': 'sunday',
'block_start': True,
},
{
'room': AULA2,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS1, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA3,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS2, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA4,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS3, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA5,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS4, SUNDAY_START-timedelta(minutes=135), flag=flag),
'day': 'sunday',
},
{
'room': HALLWAY,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_HALLWAY, SUNDAY_START+timedelta(minutes=45), flag=flag),
'day': 'sunday',
'block_end': True,
},
]
def _timestamp(dt=None):
if dt is None:
dt = datetime.now()
fmt = '%Y%m%dT%H%M%S'
return dt.strftime(fmt)
def _ignore_talk(title, names=IGNORE_TALKS):
# yes, we can paste unicode symbols, but if we change the symbol this test will still work
max_appended_symbols = 2
return any((title == name or title[:-(_len+1)] == name)
for _len in range(max_appended_symbols) for name in names)
def _hash_event(track, slot):
room = track.get('room')
name = room.get('name')
ts = _timestamp(slot.get('start'))
_hash = str(hash('{name}:{ts}'.format(name=name, ts=ts)))
_hash = _hash.replace('-', '*')
return '-'.join(_hash[i*5:(i+1)*5] for i in range(4))
def _normalize(text, tag=None, subsequent_indent=' ', **kwargs):
# tag must be always included to determine amount of space left in the first line
if tag:
max_width = ICAL_LEN - len(tag) - 1
else:
max_width = ICAL_LEN
text = text.strip().replace('\n', ICAL_NL)
return '\n'.join(textwrap.wrap(text, width=max_width, subsequent_indent=subsequent_indent, **kwargs))
# CALENDAR FUNCTIONS
def generate_event(track, slot):
room = track.get('room')
location = '{name} ({number})'.format(**room)
talk = slot.get('talk')
summary = talk.get('title', 'N/A')
transp = 'OPAQUE'
if _ignore_talk(summary):
# skip breaks
# alternatively we can include breaks into talks (duration=duration+pause)
return {}
summary = _normalize(summary, 'SUMMARY')
start = slot.get('start')
duration = talk.get('duration', 0)
# TODO add missing duration handling (nonzero default duration? title based dictionary?
dtend = _timestamp(start + timedelta(minutes=duration))
dtstart = _timestamp(start)
dtstamp = created = modified = _timestamp()
# event_uuid caused the event not to be imported to calendar
# this creates hash of name:start and split with dashes by 5
uid = _hash_event(track, slot)
author = ''
main_description = ''
tags = ''
speaker = talk.get('primary_speaker')
if speaker:
name = ' '.join([speaker.get(n, '') for n in ['first_name', 'last_name']])
author = '{name}{nl} {nl}'.format(name=name, nl=ICAL_NL)
# this is to determine how many chars do we have in the first line
# if author is used we start at position 1, otherwise it will be prefixed with tag:
desc_tag = 'DESCRIPTION' if not author else ''
abstract = talk.get('abstract', '')
if abstract:
main_description = _normalize(abstract, desc_tag, initial_indent=' ') + ICAL_NL
if 'flag' in talk:
tags = ' {nl} TAGS: {flag}'.format(nl=ICAL_NL, **talk)
description = author + main_description + tags
status = 'CONFIRMED'
sequence = 0 # number of revisions, we will use default zero even if event changed
return {'dtstart': dtstart, 'dtend': dtend, 'dtstamp': dtstamp, 'created': created,
'last-modified': modified, 'uid': uid, 'location': location, 'sequence': sequence,
'description': description, 'status': status, 'summary': summary, 'transp': transp, }
@app.route('/<lang_code>/calendar.ics')
def generate_ics():
# https://tools.ietf.org/html/rfc5545#section-2.1
# https://en.wikipedia.org/wiki/ICalendar#Technical_specifications
omni_schedule = generate_schedule(API_DATA_TALKS)
events = []
uids = set()
for track in omni_schedule:
schedule = track.get('schedule')
for slot in schedule:
evt = generate_event(track, slot)
if evt and evt.get('uid') not in uids:
events.append(evt)
uids.update([evt.get('uid')])
calendar_ics = render_template('calendar.ics', events=events)
response = make_response(calendar_ics.replace('\n', '\r\n'))
response.headers["Content-Type"] = "text/calendar"
return response
@app.route('/<lang_code>/index.html')
def index():
return render_template('index.html', **_get_template_variables(li_index='active'))
@app.route('/<lang_code>/tickets.html')
def tickets():
return render_template('tickets.html', **_get_template_variables(li_tickets='active'))
@app.route('/<lang_code>/<flag>/<day>/schedule.html')
def schedule_day_filter(flag, day):
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
variables['flag'] = flag
variables['day'] = day
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data, flag=flag)
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/<filter>/schedule.html')
def schedule_filter(filter):
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
if filter in ('friday', 'saturday', 'sunday'):
variables['day'] = filter
variables['flag'] = None
else:
variables['flag'] = filter
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data, flag=variables['flag'])
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/schedule.html')
def schedule():
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data)
variables['disable_last'] = True
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/<flag>/talks.html')
def talks_filter(flag):
variables = _get_template_variables(li_schedule_nav='active', li_talks='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = get_conference_data(url='/event/2018/talks/?flag=' + flag)
return render_template('talks.html', **variables)
@app.route('/<lang_code>/talks.html')
def talks():
variables = _get_template_variables(li_schedule_nav='active', li_talks='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = API_DATA_TALKS
return render_template('talks.html', **variables)
@app.route('/<lang_code>/speakers.html')
def speakers():
variables = _get_template_variables(li_schedule_nav='active', li_speakers='active')
variables['data'] = API_DATA_SPEAKERS
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
return render_template('speakers.html', **variables)
@app.route('/<lang_code>/speakers/<last_name>.html')
def profile(last_name):
variables = _get_template_variables(li_schedule_nav='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
for speaker in API_DATA_SPEAKERS:
if speaker['last_name'] == last_name:
variables['speaker'] = speaker
break
variables['talks'] = []
for track in generate_schedule(API_DATA_TALKS):
for talk in track['schedule']:
if ('primary_speaker' in talk['talk'] or 'secondary_speaker' in talk['talk']) and \
talk['talk']['primary_speaker']['last_name'] == variables['speaker']['last_name'] or (
'secondary_speaker' in talk['talk'] and
talk['talk']['secondary_speaker']['last_name'] == variables['speaker']['last_name']):
variables['talks'].append((track, talk))
break
return render_template('profile.html', **variables)
@app.route('/<lang_code>/cfp.html')
def cfp():
return render_template('cfp.html', **_get_template_variables(li_cfp='active'))
@app.route('/<lang_code>/coc.html')
def coc():
return render_template('coc.html', **_get_template_variables(li_coc='active'))
@app.route('/<lang_code>/hall-of-fame.html')
def hall_of_fame():
return render_template('hall-of-fame.html', **_get_template_variables(li_hall_of_fame='active'))
@app.route('/<lang_code>/venue.html')
def venue():
return render_template('venue.html', **_get_template_variables(li_venue='active'))
@app.route('/<lang_code>/sponsoring.html')
def sponsoring():
return render_template('sponsoring.html', **_get_template_variables(li_sponsoring='active'))
def get_mtime(filename):
"""Get last modification time from file"""
mtime = datetime.fromtimestamp(os.path.getmtime(filename))
return mtime.strftime(TIME_FORMAT)
SITEMAP_DEFAULT = {'prio': '0.1', 'freq': 'weekly'}
SITEMAP = {
'sitemap.xml': {'prio': '0.9', 'freq': 'daily', 'lastmod': get_mtime(__file__)},
'index.html': {'prio': '1', 'freq': 'daily'},
'schedule.html': {'prio': '0.9', 'freq': 'daily'},
'speakers.html': {'prio': '0.9', 'freq': 'daily'},
'hall_of_fame.html': {'prio': '0.5', 'freq': 'weekly'},
'tickets.html': {'prio': '0.5', 'freq': 'weekly'},
}
def get_lastmod(route, sitemap_entry):
"""Used by sitemap() below"""
if 'lastmod' in sitemap_entry:
return sitemap_entry['lastmod']
template = route.rule.split('/')[-1]
template_file = os.path.join(SRC_DIR, 'templates', template)
if os.path.exists(template_file):
return get_mtime(template_file)
return NOW
@app.route('/sitemap.xml', methods=['GET'])
def sitemap():
"""Generate sitemap.xml. Makes a list of urls and date modified."""
pages = []
# static pages
for rule in app.url_map.iter_rules():
if "GET" in rule.methods:
if len(rule.arguments) == 0:
indx = rule.rule.replace('/', '')
sitemap_data = SITEMAP.get(indx, SITEMAP_DEFAULT)
pages.append({
'loc': DOMAIN + rule.rule,
'lastmod': get_lastmod(rule, sitemap_data),
'freq': sitemap_data['freq'],
'prio': sitemap_data['prio'],
})
elif 'lang_code' in rule.arguments:
indx = rule.rule.replace('/<lang_code>/', '')
for lang in LANGS:
alternate = []
for alt_lang in LANGS:
if alt_lang != lang:
alternate.append({
'lang': alt_lang,
'url': DOMAIN + rule.rule.replace('<lang_code>', alt_lang)
})
sitemap_data = SITEMAP.get(indx, SITEMAP_DEFAULT)
pages.append({
'loc': DOMAIN + rule.rule.replace('<lang_code>', lang),
'alternate': alternate,
'lastmod': get_lastmod(rule, sitemap_data),
'freq': sitemap_data['freq'],
'prio': sitemap_data['prio'],
})
sitemap_xml = render_template('sitemap_template.xml', pages=pages)
response = make_response(sitemap_xml)
response.headers["Content-Type"] = "text/xml"
return response
if __name__ == "__main__":
app.run(debug=True, host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=int(os.environ.get('FLASK_PORT', 5000)),
use_reloader=True)
| 36.81381
| 190
| 0.612775
| 3,584
| 29,856
| 4.967913
| 0.202288
| 0.049874
| 0.027183
| 0.044482
| 0.322101
| 0.259197
| 0.224824
| 0.194945
| 0.190171
| 0.141589
| 0
| 0.017354
| 0.224109
| 29,856
| 810
| 191
| 36.859259
| 0.751133
| 0.064811
| 0
| 0.203498
| 0
| 0.00318
| 0.293575
| 0.021262
| 0
| 0
| 0
| 0.001235
| 0
| 1
| 0.050874
| false
| 0
| 0.014308
| 0.014308
| 0.120827
| 0.00159
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f510f358811538f9c09860ccdb42030579e71a1a
| 928
|
py
|
Python
|
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
scripts/fishvalidate.py
|
justinbois/fishactivity
|
6c6ac06c391b75b2725e2e2a61dd80afc34daf31
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import fishact
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Validate data files.')
parser.add_argument('activity_fname', metavar='activity_file', type=str,
help='Name of activity file.')
parser.add_argument('gtype_fname', metavar='genotype_file', type=str,
help='Name of genotype file.')
args = parser.parse_args()
print('------------------------------------------------')
print('Checking genotype file...')
fishact.validate.test_genotype_file(args.gtype_fname)
print('------------------------------------------------\n\n\n')
print('------------------------------------------------')
print('Checking activity file...')
fishact.validate.test_activity_file(args.activity_fname, args.gtype_fname)
print('------------------------------------------------')
| 37.12
| 78
| 0.519397
| 86
| 928
| 5.348837
| 0.395349
| 0.104348
| 0.073913
| 0.065217
| 0.091304
| 0.091304
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167026
| 928
| 24
| 79
| 38.666667
| 0.595084
| 0.021552
| 0
| 0.166667
| 0
| 0
| 0.409041
| 0.218302
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f513b5c28a4eaca8eb08a50fccfcd5204171dfdc
| 1,682
|
py
|
Python
|
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | 2
|
2021-08-24T16:03:30.000Z
|
2022-03-18T14:52:43.000Z
|
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | null | null | null |
scripts/rescale.py
|
danydoerr/spp_dcj
|
1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import stdout,stderr,exit
from optparse import OptionParser
from newick_parser import parse_tree_iterator, Branch
from tree_span import calculateSpan
from copy import deepcopy
def rescale_absolute(tree, max_length):
span = calculateSpan(tree)
s = max_length/float(span)
return rescale(tree, s)
def rescale(tree, scale_factor):
res = deepcopy(tree)
stack = list()
stack.append(res.subtree)
while stack:
x = stack.pop()
if x.length:
x.length *= scale_factor
if type(x) == Branch:
stack.extend(x.subtrees)
return res
if __name__ == '__main__':
usage = 'usage: %prog [options] <NEWICK FILE>'
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale_factor', dest='scale_factor',
help='Scale factor of distances in tree',
type=float, default=0, metavar='FLOAT')
parser.add_option('-a', '--absolute_length', dest='absolute',
help='Absolute length of maximal distance in tree',
type=float, default=0, metavar='FLOAT')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
if not ((options.absolute > 0) ^ (options.scale_factor > 0)):
print('!! Specify either scale factor or absolute length with ' + \
'strictly positive number', file = stderr)
exit(1)
for tree in parse_tree_iterator(open(args[0])):
if options.absolute > 0:
print(rescale_absolute(tree, options.absolute), file = stdout)
else:
print(rescale(tree, options.scale_factor), file = stdout)
| 29
| 75
| 0.633175
| 211
| 1,682
| 4.909953
| 0.369668
| 0.084942
| 0.032819
| 0.028958
| 0.067568
| 0.067568
| 0.067568
| 0.067568
| 0
| 0
| 0
| 0.007924
| 0.249703
| 1,682
| 57
| 76
| 29.508772
| 0.812995
| 0.012485
| 0
| 0.095238
| 0
| 0
| 0.159228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.119048
| 0
| 0.214286
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f51915e704bb43425413f02d24086079a01a04be
| 743
|
py
|
Python
|
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
mytest/playsnake.py
|
mrzhuzhe/stable-baselines3
|
6c3bc5fa4c3faba951099e3ccb5c74b763134b38
|
[
"MIT"
] | null | null | null |
from stable_baselines3 import PPO
import os
from setup_gym_env import SnakeEnv
import time
#models_dir = "./models/1644408901/" + "40000"
#models_dir = "./models/1644462865/" + "120000"
#models_dir = "./models/1644466638/" + "100000"
models_dir = "./models/1644485414/" + "100000"
env = SnakeEnv()
env.reset()
model = PPO.load(models_dir)
episodes = 10
# snake doesn't known where itself
for episode in range(episodes):
done = False
obs = env.reset()
#while True:#not done:
while not done:
action, _states = model.predict(obs)
#print("action",action)
obs, reward, done, info = env.step(action)
#print('reward',reward)
if done == True:
print(done)
env.render()
| 22.515152
| 50
| 0.641992
| 94
| 743
| 4.978723
| 0.531915
| 0.096154
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114187
| 0.222073
| 743
| 32
| 51
| 23.21875
| 0.695502
| 0.314939
| 0
| 0
| 0
| 0
| 0.051896
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f519a4dd8609848cb4fec6b2221b463e32b9ae3b
| 13,105
|
py
|
Python
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 4
|
2020-04-22T11:00:13.000Z
|
2020-12-16T17:49:47.000Z
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 4
|
2020-12-17T16:26:16.000Z
|
2020-12-17T16:30:34.000Z
|
yoda2h5.py
|
iamholger/yodf5
|
79ad8d77fd2b48e1b71403339e2502b42a5435c8
|
[
"MIT"
] | 2
|
2020-05-06T17:30:05.000Z
|
2020-12-16T17:58:23.000Z
|
#!/usr/bin/env python3
import yoda, sys
import h5py
import numpy as np
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
# Fix size, sometimes there is spillover
# TODO: replace with while if problem persists
if len(out) > num:
out[-2].extend(out[-1])
out = out[0:-1]
if len(out) != num:
raise Exception("something went wrong in chunkIt, the target size differs from the actual size")
return out
def createDatasets(f, binids, variations, depth=1, compression=4):
"""
Create data sets in the HDF5 file.
"""
nbins=len(binids)
nvars=len(variations)
# The fundamental moments/elements of yoda objecs
floats = [
"sumw",
"sumw2",
"sumwx",
"sumwx2",
"sumwy",
"sumwy2",
"sumwxy",
"numEntries",
"xval",
"xerr-",
"xerr+",
"yval",
"yerr-",
"yerr+",
"xmin",
"xmax",
"ymin",
"ymax"
]
# The datasets have 3 axes: binid, weight variation, point in parameter space
for df in floats: f.create_dataset(df, (nbins,nvars,depth), maxshape=(None,None,None), dtype='f' , chunks=True, compression=compression)
# Lookups --- helps when reading data and reconstucting YODA objects
f.create_group("Histo1D")
f.create_group("Histo2D")
f.create_group("Profile1D")
f.create_group("Counter")
f.create_group("Scatter1D")
f.create_group("Scatter2D")
# This is the one that works well with hdf5 when reading std::string in C++
dt = h5py.special_dtype(vlen=str)
# We use these simple lists as lookup tables to associate the elements of the datasets ^^^ with
# the actual YODA Analysis objects
import numpy as np
f.create_dataset("binids", data=np.array(binids, dtype=dt))
f.create_dataset("variations", data=np.array(variations, dtype=dt))
def dbn0ToArray(dbn):
return np.array([dbn.sumW(), dbn.sumW2(), dbn.numEntries()])
def dbn1ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), 0, 0])
def H2dbn2ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), dbn.xMin(), dbn.xMax(), dbn.yMin(), dbn.yMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), 0, 0, 0, 0])
def dbn2ToArray(dbn):
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), 0, 0])
def point2DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1), pnt.val(2), pnt.errMinus(2), pnt.errPlus(2)])
def point1DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1)])
def mkSafeHname(hname):
return hname.replace("/","|")
def mkBinids(hdict):
binids= []
for num, hname in enumerate(sorted(list(hdict.keys()))):
if hname.endswith("]"): continue
ao = hdict[hname]
base = ao.path().split("[")[0].replace("/","|")
if ao.type()=="Scatter1D" or ao.type()=="Scatter2D":
temp = ["{}#{}".format(base, i) for i in range(len(ao))]
elif ao.type()=="Counter":
temp = ["{}#{}".format(base, 0)]
else:
suffixes = ["T", "O", "U"]
if ao.type() == "Counter":
suffixes.append(0)
else:
suffixes.extend([i for i in range(len(ao))])
temp = ["{}#{}".format(base, s) for s in suffixes]
binids.extend(temp)
return binids
def mkIndexDict(datadict, allbinids):
ret = {'Histo1D':{}, 'Histo2D':{}, 'Profile1D':{}, 'Scatter1D':{}, 'Scatter2D':{}, 'Counter':{}}
for hname, v in datadict.items():
_hname=mkSafeHname(hname)
try:
ret[datadict[hname].type()][_hname] = [num for num, binid in enumerate(allbinids) if binid.startswith("{}#".format(_hname))]
except Exception as e:
print("oops: ", e)
return ret
def createIndexDS(f, d_idx):
for dtype, objects in d_idx.items():
for _hname, binIdx in objects.items():
f.create_dataset("{}/{}".format(dtype, _hname), data=binIdx , chunks=True)
def fillDatasets(f, binIdx, variations, ddict, hname, depth=0):
if len(binIdx) ==0:
print("Warning, no matching binid for {} --- is this one of the raw ratios maybe???".format(hname))
return
if ddict[hname].type()=='Histo1D':
nFields=7
fdbn = dbn1ToArray
elif ddict[hname].type()=='Histo2D':
nFields=12
fdbn = H2dbn2ToArray
elif ddict[hname].type()=='Profile1D':
fdbn = dbn2ToArray
nFields=9
elif ddict[hname].type()=='Scatter2D':
fdbn = point2DToArray
nFields=6
elif ddict[hname].type()=='Scatter1D':
fdbn = point1DToArray
nFields=3
elif ddict[hname].type()=='Counter':
nFields=3
else:
raise Exception("type {} Not implemented".format(ddict[hname].type()))
# Empty array to be filled and written to datasets
temp = np.zeros((len(binIdx), len(variations), nFields))
hids = [hname]
for v in variations[1:]:
hids.append("{}[{}]".format(hname, v))
# Iterate over variations
for col, hn in enumerate(hids):
# Iterate over bins
H=ddict[hn]
if H.type() == "Counter":
temp[0][col] = np.array([H.sumW(), H.sumW2(), H.numEntries()])
# Things with under/overflow first
elif H.type() not in ["Scatter1D", "Scatter2D", "Histo2D"]:
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = fdbn(H.overflow())
temp[2][col] = fdbn(H.underflow())
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
elif H.type() =="Histo2D":
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = 0.0 # Future proofing
temp[2][col] = 0.0 #
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
else:
for i in range(len(binIdx)):
temp[i][col] = fdbn(H.point(i))
if ddict[hname].type()=='Histo1D':
f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# elif ddict[hname].type()=='Histo2D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["sumwxy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,9]
# f["ymin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,10]
# f["ymax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,11]
# elif ddict[hname].type()=='Profile1D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# elif ddict[hname].type()=='Scatter1D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# elif ddict[hname].type()=='Scatter2D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["yval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["yerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["yerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# elif ddict[hname].type()=='Counter':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# else:
# raise Exception("yikes")
if __name__=="__main__":
import sys
import optparse, os, sys
op = optparse.OptionParser(usage=__doc__)
op.add_option("-v", "--debug", dest="DEBUG", action="store_true", default=False, help="Turn on some debug messages")
op.add_option("-o", dest="OUTPUT", default="analysisobjects.h5", help="Output HDF5 file (default: %default)")
opts, args = op.parse_args()
YODAFILES = args
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
binids, VVV, aix, aix_flat, central = None, None, None, None, None
if rank==0:
# TODO if len(args)==1 and os.path.isdir(args[0]) --- hierarchical reading with pnames finding etc
# Let's assume they are all consistent TODO add robustness
DATA0 = yoda.readYODA(args[0])
L = sorted(list(DATA0.keys()))
names = [x for x in L ]# if not "/RAW" in x]
central = [x for x in names if not x.endswith("]")]
variations = [x for x in names if x.endswith("]")]
# TODO In principle one probably should check that all variations are always the
# same, we assume this is the case here
var = []
for c in central:
var.append([x for x in variations if x.startswith(c+"[")])
## Thats the weight and weight variation order we store the data in
VVV = ["CentralWeight"]
import re
p=re.compile("\[(.*?)\]")
for x in var[0]:
try:
VVV.append(p.findall(x)[0])
except Exception as e:
print(x, e)
binids = mkBinids(DATA0)
# Hierarchical, i.e. top layer is the AnalysisObject type
aix = mkIndexDict(DATA0, binids)
# Object name as keys and lists of indices as values
aix_flat = {}
for k, v in aix.items(): aix_flat.update(v)
binids = comm.bcast(binids, root=0)
VVV = comm.bcast(VVV, root=0)
aix = comm.bcast(aix, root=0)
aix_flat = comm.bcast(aix_flat, root=0)
central = comm.bcast(central, root=0)
# NOTE dataset operations are collective
# This require h5py to use and H5 that is build with MPI
try:
f = h5py.File(opts.OUTPUT, "w", driver='mpio', comm=MPI.COMM_WORLD)
except:
f = h5py.File(opts.OUTPUT, "w")
createDatasets(f, binids, VVV, depth=len(YODAFILES))
createIndexDS(f, aix)
rankwork = chunkIt([i for i in range(len(YODAFILES))], size) if rank==0 else None
rankwork = comm.scatter(rankwork, root=0)
# This part is MPI trivial
for num, findex in enumerate(rankwork):
DATA = yoda.readYODA(YODAFILES[findex])
for hname in central:
_hname=mkSafeHname(hname)
fillDatasets(f, aix_flat[_hname], VVV, DATA, hname, depth=findex)
if rank==0:
print("[{}] --- {}/{} complete".format(rank, num, len(rankwork)))
sys.stdout.flush()
f.close()
| 37.766571
| 184
| 0.546814
| 1,743
| 13,105
| 4.084911
| 0.193919
| 0.040309
| 0.073034
| 0.078652
| 0.369101
| 0.317837
| 0.301404
| 0.289888
| 0.284831
| 0.225
| 0
| 0.032338
| 0.256696
| 13,105
| 346
| 185
| 37.875723
| 0.698594
| 0.27707
| 0
| 0.148837
| 0
| 0
| 0.0815
| 0
| 0
| 0
| 0
| 0.00578
| 0
| 1
| 0.060465
| false
| 0
| 0.037209
| 0.018605
| 0.162791
| 0.018605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f51c993aef58b3c9c160f8b68cd78fc8daf5ff42
| 1,703
|
py
|
Python
|
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
main.py
|
RichezA/UnRecurZipper
|
dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1
|
[
"MIT"
] | null | null | null |
import zipfile
import os
import glob
import sys
# Actual directory that we could find somewhere
class Folder:
def __init__(self, path):
self.path = path
print("Current working folder is: " + self.path)
self.checkForZippedFile()
self.checkForDirectories()
def checkForZippedFile(self):
self.filesToUnzip = list()
self.filesToUnzip = glob.glob(os.path.join(self.path,'*.zip'), recursive=True)
# If we find a .zip file in the current directory
for fileToUnzip in self.filesToUnzip:
print("new ZipFile found at: " + fileToUnzip)
zip_ref = zipfile.ZipFile(fileToUnzip, 'r') # We prepare to unzip
zipFilePath = fileToUnzip.split('.zip')[0] # Reformating the path to remove the .zip at the end
print("Current zip is at: " + zipFilePath)
zip_ref.extractall(zipFilePath) # Extracting .zip content
zip_ref.close() # Closing extraction flow
os.remove(zipFilePath + '.zip') # Removing the zip files
Folder(zipFilePath) # Calling Folder again
def checkForDirectories(self):
with os.scandir(self.path) as listOfDirectories:
for entry in listOfDirectories:
# We check if the actual file is a directory and if it isn't the .git one
if not entry.is_file() and entry.name != '.git':
entry = Folder(os.path.join(self.path, entry.name))
# Reading the first arg written in the console (program name not included)
fileTest = Folder(sys.argv[1])
| 47.305556
| 116
| 0.593658
| 197
| 1,703
| 5.091371
| 0.436548
| 0.047856
| 0.023928
| 0.027916
| 0.035892
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001736
| 0.323547
| 1,703
| 36
| 117
| 47.305556
| 0.868924
| 0.235467
| 0
| 0
| 0
| 0
| 0.066563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.142857
| 0
| 0.285714
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f51fe70db140c3154b176531ad8f28b9ef267b5a
| 1,974
|
py
|
Python
|
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | 2
|
2018-07-13T16:44:24.000Z
|
2019-10-14T21:31:02.000Z
|
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | null | null | null |
predict_CNN.py
|
slimtomatillo/toxic_waste_dump
|
4bc820f0b31f4420e789af11a9338c475c068889
|
[
"MIT"
] | null | null | null |
# Imports
import pandas as pd
import pickle
from keras.models import load_model
from preprocess import preprocess
from preprocess import prep_text
#Logging
import logging
logging.getLogger().setLevel(logging.INFO)
logging.info('Loading comments to classify...')
# Enter comment to be classified below
comment_to_classify = ''
def return_label(predicted_probs):
"""
Function that takes in a list of 7 class
probabilities and returns the labels
with probabilities over a certain threshold.
"""
threshold = 0.4
labels = []
classes = ['clean', 'toxic', 'severe toxic', 'obscene',
'threat', 'insult', 'identity hate']
i = 0
while i < len(classes):
if predicted_probs[i] > threshold:
labels.append(classes[i])
i += 1
return (labels)
def predict_label(comment_str):
"""
Function that takes in a comment in
string form and returns the predicted
class labels: not toxic, toxic, severe
toxic, obscene, threat, insults, identity
hate. May output multiple labels.
"""
data = pd.DataFrame(data=[comment_str], columns=['comment_text'])
logging.info('Comments loaded.')
# Preprocess text
X_to_predict = preprocess(data)
# Identify data to make predictions from
X_to_predict = X_to_predict['model_text']
# Format data properly
X_to_predict = prep_text(X_to_predict)
logging.info('Loading model...')
# Load CNN from disk
cnn = load_model('model/CNN/binarycrossentropy_adam/model-04-0.9781.hdf5')
logging.info('Model loaded.')
logging.info('Making prediction(s)...')
# Make predictions
preds = cnn.predict(X_to_predict)
for each_comment, prob in zip(data['comment_text'], preds):
print('COMMENT:')
print(each_comment)
print()
print('PREDICTION:')
print(return_label(prob))
print()
logging.info('Finished.')
predict_label(comment_to_classify)
| 24.675
| 78
| 0.670719
| 250
| 1,974
| 5.16
| 0.412
| 0.05969
| 0.046512
| 0.029457
| 0.075969
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008486
| 0.223911
| 1,974
| 79
| 79
| 24.987342
| 0.833551
| 0.241135
| 0
| 0.05
| 0
| 0
| 0.187326
| 0.037604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.225
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5209853412f11170538a00e749d5b0ede34e2eb
| 797
|
py
|
Python
|
299. Bulls and Cows.py
|
fossabot/leetcode-2
|
335f1aa3ee785320515c3d3f03c2cb2df3bc13ba
|
[
"MIT"
] | 2
|
2018-02-26T09:12:19.000Z
|
2019-06-07T13:38:10.000Z
|
299. Bulls and Cows.py
|
fossabot/leetcode-2
|
335f1aa3ee785320515c3d3f03c2cb2df3bc13ba
|
[
"MIT"
] | 1
|
2018-12-24T07:03:34.000Z
|
2018-12-24T07:03:34.000Z
|
299. Bulls and Cows.py
|
fossabot/leetcode-2
|
335f1aa3ee785320515c3d3f03c2cb2df3bc13ba
|
[
"MIT"
] | 2
|
2018-12-24T07:01:03.000Z
|
2019-06-07T13:38:07.000Z
|
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
dic = {}
countA = 0
setA = set()
for i in range(len(secret)):
if secret[i] == guess[i]:
countA += 1
setA.add(i)
elif secret[i] not in dic:
dic[secret[i]] = 1
else:
dic[secret[i]] += 1
countB = 0
for i in range(len(guess)):
if i not in setA:
if guess[i] in dic:
countB += 1
dic[guess[i]] -= 1
if dic[guess[i]] == 0:
del dic[guess[i]]
return str(countA)+"A"+str(countB)+"B"
| 28.464286
| 46
| 0.385194
| 92
| 797
| 3.336957
| 0.347826
| 0.09772
| 0.087948
| 0.071661
| 0.091205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019704
| 0.49059
| 797
| 27
| 47
| 29.518519
| 0.736453
| 0.057716
| 0
| 0
| 0
| 0
| 0.002813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f528bf891d405b1631574286911aea9a15dea4b2
| 1,566
|
py
|
Python
|
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py
|
codesmith-gmbh/forge
|
43c334d829a727b48f8e21e273017c51394010f9
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import boto3
from box import Box
from crhelper import CfnResource
from schema import Optional
import codesmith.common.naming as naming
from codesmith.common.cfn import resource_properties
from codesmith.common.schema import encoded_bool, non_empty_string, tolerant_schema
from codesmith.common.ssm import put_string_parameter, silent_delete_parameter_from_event
helper = CfnResource()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
properties_schema = tolerant_schema({
'UserPoolId': non_empty_string,
'UserPoolClientId': non_empty_string,
Optional('All', default=False): encoded_bool,
Optional('Domains', default=[]): [str],
Optional('Emails', default=[]): [str]
})
ssm = boto3.client('ssm')
def validate_properties(props):
return Box(properties_schema.validate(props), camel_killer_box=True)
@helper.create
@helper.update
def create(event, _):
p = validate_properties(resource_properties(event))
parameter_name = naming.cog_cond_pre_auth_parameter_name(p.user_pool_id, p.user_pool_client_id)
parameter_value = json.dumps({'All': p.all, 'Domains': p.domains, 'Emails': p.emails})
put_string_parameter(ssm, parameter_name,
value=parameter_value,
description='Forge Cognito Pre Auth Settings Parameter')
return parameter_name
@helper.delete
def delete(event, _):
return silent_delete_parameter_from_event(ssm, event)
def handler(event, context):
logger.info('event: %s', event)
helper(event, context)
| 29.54717
| 99
| 0.751596
| 198
| 1,566
| 5.686869
| 0.353535
| 0.053286
| 0.050622
| 0.044405
| 0.053286
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001502
| 0.149425
| 1,566
| 52
| 100
| 30.115385
| 0.843844
| 0
| 0
| 0
| 0
| 0
| 0.070881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.25641
| 0.051282
| 0.435897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f52d16005d54fc06009e6a33b0d9fa26ef35fd47
| 2,093
|
py
|
Python
|
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | 1
|
2018-07-04T09:14:26.000Z
|
2018-07-04T09:14:26.000Z
|
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | null | null | null |
dl_tutorials/torch_neural_networks.py
|
learnerzhang/AnalyticsVidhya
|
697689a24a9d73785164512cab8ac4ee5494afe8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-01-02 16:44
# @Author : zhangzhen
# @Site :
# @File : torch_neural_networks.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5 * 5 square convolution
# kernel
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, *input):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(input[0])), (2, 2))
# if the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
if __name__ == '__main__':
net = Net()
criterion = nn.MSELoss()
print(net)
params = list(net.parameters())
print("参数个数:", len(params))
for param in params:
print(param.size())
input = torch.randn(1, 1, 32, 32)
target = torch.randn(10)
out = net(input)
loss = criterion(out, target)
print(100 * "=")
print(out, target)
print("Loss:", loss)
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
net.zero_grad()
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
| 26.493671
| 77
| 0.592929
| 313
| 2,093
| 3.837061
| 0.373802
| 0.006661
| 0.029975
| 0.037469
| 0.149875
| 0.131557
| 0.083264
| 0.049958
| 0
| 0
| 0
| 0.055087
| 0.262781
| 2,093
| 78
| 78
| 26.833333
| 0.723266
| 0.180602
| 0
| 0.040816
| 0
| 0
| 0.047114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.061224
| 0
| 0.183673
| 0.265306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f52ec88be52c378180af93ce81749dca618e2061
| 2,577
|
py
|
Python
|
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/leonard.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
"""
Leonard always DRIVES Sheldon (this module is the __main__ driver for Sheldon)
"""
import argparse
import sys
import os
try:
from cooper import Sheldon
except:
from .cooper import Sheldon
# Extensions for python source files
EXTENSIONS = [".py", ".mpy"]
def parse_commandline():
parser = argparse.ArgumentParser(
description="Find divisions in Python code")
parser.add_argument("-u", "--human_readable",
help="Display friendlier output",
action="store_true")
parser.add_argument("-r", "--recursive",
help="Scan subdirectories recursively",
action="store_true")
parser.add_argument("path",
type=str,
help="Path to the target file or directory")
return parser.parse_args()
def process_files(files, divs_found, readable, path=""):
for filename in files:
fname = os.path.join(path, filename)
with open(fname) as f:
pysource = f.read()
s = Sheldon(pysource)
try:
s.analyze()
except SyntaxError:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(f"{fname} {exc_tb.tb_lineno} SyntaxError")
continue
divs_found += len(s.divisions)
s.printdivs(fname, s.divisions, readable)
return divs_found
def main():
args = parse_commandline()
if args.human_readable:
def readableprint(*args, **kwargs):
print(*args, **kwargs)
else:
readableprint = lambda *a, **k: None # do - nothing function
files_checked = 0
divs_found = 0
# Directory path
if os.path.isdir(args.path):
for path, dirs, files in os.walk(args.path):
files = [f for f in os.listdir(path) if f.endswith(tuple(EXTENSIONS))]
files_checked += len(files)
divs_found = process_files(files, divs_found, args.human_readable, path=path)
if not args.recursive:
exit(0)
readableprint(f"{files_checked} files checked")
readableprint(f"{divs_found} divisions found")
# File path
elif os.path.isfile(args.path):
files =[f for f in [args.path] if args.path.endswith(tuple(EXTENSIONS))]
divs_found = process_files(files, divs_found, args.human_readable)
readableprint(f"{divs_found} divisions found")
# Error
else:
sys.exit(f"{args.path} doesn't exist!")
if __name__ == "__main__":
main()
| 28.633333
| 89
| 0.592549
| 303
| 2,577
| 4.887789
| 0.376238
| 0.06077
| 0.037812
| 0.042539
| 0.207968
| 0.190412
| 0.097232
| 0.070223
| 0.070223
| 0.070223
| 0
| 0.001664
| 0.300349
| 2,577
| 89
| 90
| 28.955056
| 0.819745
| 0.064804
| 0
| 0.129032
| 0
| 0
| 0.141785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.080645
| 0
| 0.177419
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f52efbe88e2653ae5d1fd37a74f972d83828b114
| 40,749
|
py
|
Python
|
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/cherrypy/test/test_core.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
"""Basic tests for the CherryPy core: request handling."""
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import http, static
import types
import os
localDir = os.path.dirname(__file__)
log_file = os.path.join(localDir, "test.log")
log_access_file = os.path.join(localDir, "access.log")
favicon_path = os.path.join(os.getcwd(), localDir, "../favicon.ico")
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "PROPFIND")
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
favicon_ico = tools.staticfile.handler(filename=favicon_path)
def andnow(self):
return "the larch"
andnow.exposed = True
def global_(self):
pass
global_.exposed = True
def delglobal(self):
del self.__class__.__dict__['global_']
delglobal.exposed = True
def defct(self, newct):
newct = "text/%s" % newct
cherrypy.config.update({'tools.response_headers.on': True,
'tools.response_headers.headers':
[('Content-Type', newct)]})
defct.exposed = True
def upload(self, file):
return "Size: %s" % len(file.file.read())
upload.exposed = True
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class URL(Test):
_cp_config = {'tools.trailing_slash.on': False}
def index(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
def leaf(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
class Params(Test):
def index(self, thing):
return repr(thing)
def ismap(self, x, y):
return "Coordinates: %s, %s" % (x, y)
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, kwargs)
class Status(Test):
def index(self):
return "normal"
def blank(self):
cherrypy.response.status = ""
# According to RFC 2616, new status codes are OK as long as they
# are between 100 and 599.
# Here is an illegal code...
def illegal(self):
cherrypy.response.status = 781
return "oops"
# ...and here is an unknown but legal code.
def unknown(self):
cherrypy.response.status = "431 My custom error"
return "funky"
# Non-numeric code
def bad(self):
cherrypy.response.status = "error"
return "bad news"
class Redirect(Test):
class Error:
_cp_config = {"tools.err_redirect.on": True,
"tools.err_redirect.url": "/errpage",
"tools.err_redirect.internal": False,
}
def index(self):
raise NameError("redirect_test")
index.exposed = True
error = Error()
def index(self):
return "child"
def by_code(self, code):
raise cherrypy.HTTPRedirect("somewhere else", code)
by_code._cp_config = {'tools.trailing_slash.extra': True}
def nomodify(self):
raise cherrypy.HTTPRedirect("", 304)
def proxy(self):
raise cherrypy.HTTPRedirect("proxy", 305)
def stringify(self):
return str(cherrypy.HTTPRedirect("/"))
def fragment(self, frag):
raise cherrypy.HTTPRedirect("/some/url#%s" % frag)
def login_redir():
if not getattr(cherrypy.request, "login", None):
raise cherrypy.InternalRedirect("/internalredirect/login")
tools.login_redir = _cptools.Tool('before_handler', login_redir)
def redir_custom():
raise cherrypy.InternalRedirect("/internalredirect/custom_err")
class InternalRedirect(Test):
def index(self):
raise cherrypy.InternalRedirect("/")
def relative(self, a, b):
raise cherrypy.InternalRedirect("cousin?t=6")
def cousin(self, t):
assert cherrypy.request.prev.closed
return cherrypy.request.prev.query_string
def petshop(self, user_id):
if user_id == "parrot":
# Trade it for a slug when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug')
elif user_id == "terrier":
# Trade it for a fish when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish')
else:
# This should pass the user_id through to getImagesByUser
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=%s' % user_id)
# We support Python 2.3, but the @-deco syntax would look like this:
# @tools.login_redir()
def secure(self):
return "Welcome!"
secure = tools.login_redir()(secure)
# Since calling the tool returns the same function you pass in,
# you could skip binding the return value, and just write:
# tools.login_redir()(secure)
def login(self):
return "Please log in"
login._cp_config = {'hooks.before_error_response': redir_custom}
def custom_err(self):
return "Something went horribly wrong."
def early_ir(self, arg):
return "whatever"
early_ir._cp_config = {'hooks.before_request_body': redir_custom}
class Image(Test):
def getImagesByUser(self, user_id):
return "0 images for %s" % user_id
class Flatten(Test):
def as_string(self):
return "content"
def as_list(self):
return ["con", "tent"]
def as_yield(self):
yield "content"
def as_dblyield(self):
yield self.as_yield()
as_dblyield._cp_config = {'tools.flatten.on': True}
def as_refyield(self):
for chunk in self.as_yield():
yield chunk
class Error(Test):
_cp_config = {'tools.log_tracebacks.on': True,
}
def custom(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
custom._cp_config = {'error_page.404': os.path.join(localDir, "static/index.html")}
def noexist(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
noexist._cp_config = {'error_page.404': "nonexistent.html"}
def page_method(self):
raise ValueError()
def page_yield(self):
yield "howdy"
raise ValueError()
def page_streamed(self):
yield "word up"
raise ValueError()
yield "very oops"
page_streamed._cp_config = {"response.stream": True}
def cause_err_in_finalize(self):
# Since status must start with an int, this should error.
cherrypy.response.status = "ZOO OK"
cause_err_in_finalize._cp_config = {'request.show_tracebacks': False}
def rethrow(self):
"""Test that an error raised here will be thrown out to the server."""
raise ValueError()
rethrow._cp_config = {'request.throw_errors': True}
class Ranges(Test):
def get_ranges(self, bytes):
return repr(http.get_ranges('bytes=%s' % bytes, 8))
def slice_file(self):
path = os.path.join(os.getcwd(), os.path.dirname(__file__))
return static.serve_file(os.path.join(path, "static/index.html"))
class Expect(Test):
def expectation_failed(self):
expect = cherrypy.request.headers.elements("Expect")
if expect and expect[0].value != '100-continue':
raise cherrypy.HTTPError(400)
raise cherrypy.HTTPError(417, 'Expectation Failed')
class Headers(Test):
def default(self, headername):
"""Spit back out the value for the requested header."""
return cherrypy.request.headers[headername]
def doubledheaders(self):
# From http://www.cherrypy.org/ticket/165:
# "header field names should not be case sensitive sayes the rfc.
# if i set a headerfield in complete lowercase i end up with two
# header fields, one in lowercase, the other in mixed-case."
# Set the most common headers
hMap = cherrypy.response.headers
hMap['content-type'] = "text/html"
hMap['content-length'] = 18
hMap['server'] = 'CherryPy headertest'
hMap['location'] = ('%s://%s:%s/headers/'
% (cherrypy.request.local.ip,
cherrypy.request.local.port,
cherrypy.request.scheme))
# Set a rare header for fun
hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT'
return "double header test"
def ifmatch(self):
val = cherrypy.request.headers['If-Match']
cherrypy.response.headers['ETag'] = val
return repr(val)
class HeaderElements(Test):
def get_elements(self, headername):
e = cherrypy.request.headers.elements(headername)
return "\n".join([unicode(x) for x in e])
class Method(Test):
def index(self):
m = cherrypy.request.method
if m in defined_http_methods:
return m
if m == "LINK":
raise cherrypy.HTTPError(405)
else:
raise cherrypy.HTTPError(501)
def parameterized(self, data):
return data
def request_body(self):
# This should be a file object (temp file),
# which CP will just pipe back out if we tell it to.
return cherrypy.request.body
def reachable(self):
return "success"
class Divorce:
"""HTTP Method handlers shouldn't collide with normal method names.
For example, a GET-handler shouldn't collide with a method named 'get'.
If you build HTTP method dispatching into CherryPy, rewrite this class
to use your new dispatch mechanism and make sure that:
"GET /divorce HTTP/1.1" maps to divorce.index() and
"GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get()
"""
documents = {}
def index(self):
yield "<h1>Choose your document</h1>\n"
yield "<ul>\n"
for id, contents in self.documents.iteritems():
yield (" <li><a href='/divorce/get?ID=%s'>%s</a>: %s</li>\n"
% (id, id, contents))
yield "</ul>"
index.exposed = True
def get(self, ID):
return ("Divorce document %s: %s" %
(ID, self.documents.get(ID, "empty")))
get.exposed = True
root.divorce = Divorce()
class Cookies(Test):
def single(self, name):
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
def multiple(self, names):
for name in names:
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
class ThreadLocal(Test):
def index(self):
existing = repr(getattr(cherrypy.request, "asdf", None))
cherrypy.request.asdf = "rassfrassin"
return existing
cherrypy.config.update({
'log.error_file': log_file,
'environment': 'test_suite',
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
})
appconf = {
'/': {'log.access_file': log_access_file},
'/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")},
}
cherrypy.tree.mount(root, config=appconf)
# Client-side code #
from cherrypy.test import helper
class CoreRequestHandlingTest(helper.CPWebCase):
def testParams(self):
self.getPage("/params/?thing=a")
self.assertBody("'a'")
self.getPage("/params/?thing=a&thing=b&thing=c")
self.assertBody("['a', 'b', 'c']")
# Test friendly error message when given params are not accepted.
ignore = helper.webtest.ignored_exceptions
ignore.append(TypeError)
try:
self.getPage("/params/?notathing=meeting")
self.assertInBody("index() got an unexpected keyword argument 'notathing'")
finally:
ignore.pop()
# Test "% HEX HEX"-encoded URL, param keys, and values
self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville")
self.assertBody(r"args: ('\xd4 \xe3', 'cheese') "
r"kwargs: {'Gruy\xe8re': 'Bulgn\xe9ville'}")
# Make sure that encoded = and & get parsed correctly
self.getPage("/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2")
self.assertBody(r"args: ('code',) "
r"kwargs: {'url': 'http://cherrypy.org/index?a=1&b=2'}")
# Test coordinates sent by <img ismap>
self.getPage("/params/ismap?223,114")
self.assertBody("Coordinates: 223, 114")
def testStatus(self):
self.getPage("/status/")
self.assertBody('normal')
self.assertStatus(200)
self.getPage("/status/blank")
self.assertBody('')
self.assertStatus(200)
self.getPage("/status/illegal")
self.assertStatus(500)
msg = "Illegal response status from server (781 is out of range)."
self.assertErrorPage(500, msg)
self.getPage("/status/unknown")
self.assertBody('funky')
self.assertStatus(431)
self.getPage("/status/bad")
self.assertStatus(500)
msg = "Illegal response status from server ('error' is non-numeric)."
self.assertErrorPage(500, msg)
def testLogging(self):
f = open(log_access_file, "wb")
f.write("")
f.close()
f = open(log_file, "wb")
f.write("")
f.close()
self.getPage("/flatten/as_string")
self.assertBody('content')
self.assertStatus(200)
self.getPage("/flatten/as_yield")
self.assertBody('content')
self.assertStatus(200)
data = open(log_access_file, "rb").readlines()
host = self.HOST
if not host:
# The empty string signifies INADDR_ANY,
# which should respond on localhost.
host = "127.0.0.1"
intro = '%s - - [' % host
if not data[0].startswith(intro):
self.fail("%r doesn't start with %r" % (data[0], intro))
haslength = False
for k, v in self.headers:
if k.lower() == 'content-length':
haslength = True
line = data[-2].strip()
if haslength:
if not line.endswith('] "GET %s/flatten/as_string HTTP/1.1" 200 7 "" ""'
% self.prefix()):
self.fail(line)
else:
if not line.endswith('] "GET %s/flatten/as_string HTTP/1.1" 200 - "" ""'
% self.prefix()):
self.fail(line)
if not data[-1].startswith(intro):
self.fail("%r doesn't start with %r" % (data[-1], intro))
haslength = False
for k, v in self.headers:
if k.lower() == 'content-length':
haslength = True
line = data[-1].strip()
if haslength:
self.assert_(line.endswith('] "GET %s/flatten/as_yield HTTP/1.1" 200 7 "" ""'
% self.prefix()))
else:
self.assert_(line.endswith('] "GET %s/flatten/as_yield HTTP/1.1" 200 - "" ""'
% self.prefix()))
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
# Test that tracebacks get written to the error log.
self.getPage("/error/page_method")
self.assertInBody("raise ValueError()")
data = open(log_file, "rb").readlines()
self.assertEqual(data[0].strip().endswith('HTTP Traceback (most recent call last):'), True)
self.assertEqual(data[-3].strip().endswith('raise ValueError()'), True)
finally:
ignore.pop()
def testSlashes(self):
# Test that requests for index methods without a trailing slash
# get redirected to the same URI path with a trailing slash.
# Make sure GET params are preserved.
self.getPage("/redirect?id=3")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/redirect/?id=3'>"
"%s/redirect/?id=3</a>" % (self.base(), self.base()))
if self.prefix():
# Corner case: the "trailing slash" redirect could be tricky if
# we're using a virtual root and the URI is "/vroot" (no slash).
self.getPage("")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/'>%s/</a>" %
(self.base(), self.base()))
# Test that requests for NON-index methods WITH a trailing slash
# get redirected to the same URI path WITHOUT a trailing slash.
# Make sure GET params are preserved.
self.getPage("/redirect/by_code/?code=307")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/redirect/by_code?code=307'>"
"%s/redirect/by_code?code=307</a>"
% (self.base(), self.base()))
# If the trailing_slash tool is off, CP should just continue
# as if the slashes were correct. But it needs some help
# inside cherrypy.url to form correct output.
self.getPage('/url?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf/?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
def testRedirect(self):
self.getPage("/redirect/")
self.assertBody('child')
self.assertStatus(200)
self.getPage("/redirect/by_code?code=300")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(300)
self.getPage("/redirect/by_code?code=301")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(301)
self.getPage("/redirect/by_code?code=302")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(302)
self.getPage("/redirect/by_code?code=303")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(303)
self.getPage("/redirect/by_code?code=307")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(307)
self.getPage("/redirect/nomodify")
self.assertBody('')
self.assertStatus(304)
self.getPage("/redirect/proxy")
self.assertBody('')
self.assertStatus(305)
# HTTPRedirect on error
self.getPage("/redirect/error/")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody('/errpage')
# Make sure str(HTTPRedirect()) works.
self.getPage("/redirect/stringify", protocol="HTTP/1.0")
self.assertStatus(200)
self.assertBody("(['%s/'], 302)" % self.base())
if cherrypy.server.protocol_version == "HTTP/1.1":
self.getPage("/redirect/stringify", protocol="HTTP/1.1")
self.assertStatus(200)
self.assertBody("(['%s/'], 303)" % self.base())
# check that #fragments are handled properly
# http://skrb.org/ietf/http_errata.html#location-fragments
frag = "foo"
self.getPage("/redirect/fragment/%s" % frag)
self.assertMatchesBody(r"<a href='(.*)\/some\/url\#%s'>\1\/some\/url\#%s</a>" % (frag, frag))
loc = self.assertHeader('Location')
assert loc.endswith("#%s" % frag)
self.assertStatus(('302 Found', '303 See Other'))
def test_InternalRedirect(self):
# InternalRedirect
self.getPage("/internalredirect/")
self.assertBody('hello')
self.assertStatus(200)
# Test passthrough
self.getPage("/internalredirect/petshop?user_id=Sir-not-appearing-in-this-film")
self.assertBody('0 images for Sir-not-appearing-in-this-film')
self.assertStatus(200)
# Test args
self.getPage("/internalredirect/petshop?user_id=parrot")
self.assertBody('0 images for slug')
self.assertStatus(200)
# Test POST
self.getPage("/internalredirect/petshop", method="POST",
body="user_id=terrier")
self.assertBody('0 images for fish')
self.assertStatus(200)
# Test ir before body read
self.getPage("/internalredirect/early_ir", method="POST",
body="arg=aha!")
self.assertBody("Something went horribly wrong.")
self.assertStatus(200)
self.getPage("/internalredirect/secure")
self.assertBody('Please log in')
self.assertStatus(200)
# Relative path in InternalRedirect.
# Also tests request.prev.
self.getPage("/internalredirect/relative?a=3&b=5")
self.assertBody("a=3&b=5")
self.assertStatus(200)
# InternalRedirect on error
self.getPage("/internalredirect/login/illegal/extra/vpath/atoms")
self.assertStatus(200)
self.assertBody("Something went horribly wrong.")
def testFlatten(self):
for url in ["/flatten/as_string", "/flatten/as_list",
"/flatten/as_yield", "/flatten/as_dblyield",
"/flatten/as_refyield"]:
self.getPage(url)
self.assertBody('content')
def testErrorHandling(self):
self.getPage("/error/missing")
self.assertStatus(404)
self.assertErrorPage(404, "The path '/error/missing' was not found.")
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
valerr = '\n raise ValueError()\nValueError'
self.getPage("/error/page_method")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_yield")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_streamed")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus(200)
self.assertBody("word upUnrecoverable error in the server.")
# No traceback should be present
self.getPage("/error/cause_err_in_finalize")
msg = "Illegal response status from server ('ZOO' is non-numeric)."
self.assertErrorPage(500, msg, None)
finally:
ignore.pop()
# Test custom error page.
self.getPage("/error/custom")
self.assertStatus(404)
self.assertBody("Hello, world\r\n" + (" " * 499))
# Test error in custom error page (ticket #305).
# Note that the message is escaped for HTML (ticket #310).
self.getPage("/error/noexist")
self.assertStatus(404)
msg = ("No, <b>really</b>, not found!<br />"
"In addition, the custom error page failed:\n<br />"
"[Errno 2] No such file or directory: 'nonexistent.html'")
self.assertInBody(msg)
if (hasattr(self, 'harness') and
"modpython" in self.harness.__class__.__name__.lower()):
pass
else:
# Test throw_errors (ticket #186).
self.getPage("/error/rethrow")
self.assertInBody("raise ValueError()")
def testRanges(self):
self.getPage("/ranges/get_ranges?bytes=3-6")
self.assertBody("[(3, 7)]")
# Test multiple ranges and a suffix-byte-range-spec, for good measure.
self.getPage("/ranges/get_ranges?bytes=2-4,-1")
self.assertBody("[(2, 5), (7, 8)]")
# Get a partial file.
if cherrypy.server.protocol_version == "HTTP/1.1":
self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')])
self.assertStatus(206)
self.assertHeader("Content-Type", "text/html")
self.assertHeader("Content-Range", "bytes 2-5/14")
self.assertBody("llo,")
# What happens with overlapping ranges (and out of order, too)?
self.getPage("/ranges/slice_file", [('Range', 'bytes=4-6,2-5')])
self.assertStatus(206)
ct = self.assertHeader("Content-Type")
expected_type = "multipart/byteranges; boundary="
self.assert_(ct.startswith(expected_type))
boundary = ct[len(expected_type):]
expected_body = ("\r\n--%s\r\n"
"Content-type: text/html\r\n"
"Content-range: bytes 4-6/14\r\n"
"\r\n"
"o, \r\n"
"--%s\r\n"
"Content-type: text/html\r\n"
"Content-range: bytes 2-5/14\r\n"
"\r\n"
"llo,\r\n"
"--%s--\r\n" % (boundary, boundary, boundary))
self.assertBody(expected_body)
self.assertHeader("Content-Length")
# Test "416 Requested Range Not Satisfiable"
self.getPage("/ranges/slice_file", [('Range', 'bytes=2300-2900')])
self.assertStatus(416)
# "When this status code is returned for a byte-range request,
# the response SHOULD include a Content-Range entity-header
# field specifying the current length of the selected resource"
self.assertHeader("Content-Range", "bytes */14")
elif cherrypy.server.protocol_version == "HTTP/1.0":
# Test Range behavior with HTTP/1.0 request
self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')])
self.assertStatus(200)
self.assertBody("Hello, world\r\n")
def testExpect(self):
e = ('Expect', '100-continue')
self.getPage("/headerelements/get_elements?headername=Expect", [e])
self.assertBody('100-continue')
self.getPage("/expect/expectation_failed", [('Content-Length', '200'), e])
self.assertStatus(417)
def testHeaderElements(self):
# Accept-* header elements should be sorted, with most preferred first.
h = [('Accept', 'audio/*; q=0.2, audio/basic')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("audio/basic\n"
"audio/*;q=0.2")
h = [('Accept', 'text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/x-c\n"
"text/html\n"
"text/x-dvi;q=0.8\n"
"text/plain;q=0.5")
# Test that more specific media ranges get priority.
h = [('Accept', 'text/*, text/html, text/html;level=1, */*')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/html;level=1\n"
"text/html\n"
"text/*\n"
"*/*")
# Test Accept-Charset
h = [('Accept-Charset', 'iso-8859-5, unicode-1-1;q=0.8')]
self.getPage("/headerelements/get_elements?headername=Accept-Charset", h)
self.assertStatus("200 OK")
self.assertBody("iso-8859-5\n"
"unicode-1-1;q=0.8")
# Test Accept-Encoding
h = [('Accept-Encoding', 'gzip;q=1.0, identity; q=0.5, *;q=0')]
self.getPage("/headerelements/get_elements?headername=Accept-Encoding", h)
self.assertStatus("200 OK")
self.assertBody("gzip;q=1.0\n"
"identity;q=0.5\n"
"*;q=0")
# Test Accept-Language
h = [('Accept-Language', 'da, en-gb;q=0.8, en;q=0.7')]
self.getPage("/headerelements/get_elements?headername=Accept-Language", h)
self.assertStatus("200 OK")
self.assertBody("da\n"
"en-gb;q=0.8\n"
"en;q=0.7")
def testHeaders(self):
# Tests that each header only appears once, regardless of case.
self.getPage("/headers/doubledheaders")
self.assertBody("double header test")
hnames = [name.title() for name, val in self.headers]
for key in ['Content-Length', 'Content-Type', 'Date',
'Expires', 'Location', 'Server']:
self.assertEqual(hnames.count(key), 1)
if cherrypy.server.protocol_version == "HTTP/1.1":
# Test RFC-2047-encoded request and response header values
c = "=E2=84=ABngstr=C3=B6m"
self.getPage("/headers/ifmatch", [('If-Match', '=?utf-8?q?%s?=' % c)])
self.assertBody("u'\\u212bngstr\\xf6m'")
self.assertHeader("ETag", '=?utf-8?b?4oSrbmdzdHLDtm0=?=')
# Test a *LONG* RFC-2047-encoded request and response header value
self.getPage("/headers/ifmatch",
[('If-Match', '=?utf-8?q?%s?=' % (c * 10))])
self.assertBody("u'%s'" % ('\\u212bngstr\\xf6m' * 10))
self.assertHeader("ETag",
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt4oSrbmdzdHLDtm0=?='
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt4oSrbmdzdHLDtm0=?='
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2bQ==?=')
# Test that two request headers are collapsed into one.
# See http://www.cherrypy.org/ticket/542.
self.getPage("/headers/Accept-Charset",
headers=[("Accept-Charset", "iso-8859-5"),
("Accept-Charset", "unicode-1-1;q=0.8")])
self.assertBody("iso-8859-5, unicode-1-1;q=0.8")
# If we don't pass a Content-Type header, it should not be present
# in cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[])
self.assertStatus(500)
# If Content-Type is present in the request, it should be present in
# cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[("Content-type", "application/json")])
self.assertBody("application/json")
def testHTTPMethods(self):
helper.webtest.methods_with_bodies = ("POST", "PUT", "PROPFIND")
# Test that all defined HTTP methods work.
for m in defined_http_methods:
self.getPage("/method/", method=m)
# HEAD requests should not return any body.
if m == "HEAD":
self.assertBody("")
elif m == "TRACE":
# Some HTTP servers (like modpy) have their own TRACE support
self.assertEqual(self.body[:5], "TRACE")
else:
self.assertBody(m)
# Request a PUT method with a form-urlencoded body
self.getPage("/method/parameterized", method="PUT",
body="data=on+top+of+other+things")
self.assertBody("on top of other things")
# Request a PUT method with a file body
b = "one thing on top of another"
h = [("Content-Type", "text/plain"),
("Content-Length", str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PUT", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a PUT method with no body whatsoever (not an empty one).
# See http://www.cherrypy.org/ticket/650.
# Provide a C-T or webtest will provide one (and a C-L) for us.
h = [("Content-Type", "text/plain")]
self.getPage("/method/reachable", headers=h, method="PUT")
self.assertBody("success")
# Request a custom method with a request body
b = ('<?xml version="1.0" encoding="utf-8" ?>\n\n'
'<propfind xmlns="DAV:"><prop><getlastmodified/>'
'</prop></propfind>')
h = [('Content-Type', 'text/xml'),
('Content-Length', str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PROPFIND", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a disallowed method
self.getPage("/method/", method="LINK")
self.assertStatus(405)
# Request an unknown method
self.getPage("/method/", method="SEARCH")
self.assertStatus(501)
# For method dispatchers: make sure that an HTTP method doesn't
# collide with a virtual path atom. If you build HTTP-method
# dispatching into the core, rewrite these handlers to use
# your dispatch idioms.
self.getPage("/divorce/get?ID=13")
self.assertBody('Divorce document 13: empty')
self.assertStatus(200)
self.getPage("/divorce/", method="GET")
self.assertBody('<h1>Choose your document</h1>\n<ul>\n</ul>')
self.assertStatus(200)
def testFavicon(self):
# favicon.ico is served by staticfile.
icofilename = os.path.join(localDir, "../favicon.ico")
icofile = open(icofilename, "rb")
data = icofile.read()
icofile.close()
self.getPage("/favicon.ico")
self.assertBody(data)
def testCookies(self):
import sys
if sys.version_info >= (2, 5):
self.getPage("/cookies/single?name=First",
[('Cookie', 'First=Dinsdale;')])
self.assertHeader('Set-Cookie', 'First=Dinsdale')
self.getPage("/cookies/multiple?names=First&names=Last",
[('Cookie', 'First=Dinsdale; Last=Piranha;'),
])
self.assertHeader('Set-Cookie', 'First=Dinsdale')
self.assertHeader('Set-Cookie', 'Last=Piranha')
else:
self.getPage("/cookies/single?name=First",
[('Cookie', 'First=Dinsdale;')])
self.assertHeader('Set-Cookie', 'First=Dinsdale;')
self.getPage("/cookies/multiple?names=First&names=Last",
[('Cookie', 'First=Dinsdale; Last=Piranha;'),
])
self.assertHeader('Set-Cookie', 'First=Dinsdale;')
self.assertHeader('Set-Cookie', 'Last=Piranha;')
def testMaxRequestSize(self):
self.getPage("/", headers=[('From', "x" * 500)])
self.assertStatus(413)
# Test for http://www.cherrypy.org/ticket/421
# (Incorrect border condition in readline of SizeCheckWrapper).
# This hangs in rev 891 and earlier.
lines256 = "x" * 248
self.getPage("/",
headers=[('Host', '%s:%s' % (self.HOST, self.PORT)),
('From', lines256)])
# Test upload
body = """--x
Content-Disposition: form-data; name="file"; filename="hello.txt"
Content-Type: text/plain
%s
--x--
"""
b = body % ("x" * 96)
h = [("Content-type", "multipart/form-data; boundary=x"),
("Content-Length", len(b))]
self.getPage('/upload', h, "POST", b)
self.assertBody('Size: 96')
b = body % ("x" * 200)
h = [("Content-type", "multipart/form-data; boundary=x"),
("Content-Length", len(b))]
self.getPage('/upload', h, "POST", b)
self.assertStatus(413)
def testEmptyThreadlocals(self):
results = []
for x in xrange(20):
self.getPage("/threadlocal/")
results.append(self.body)
self.assertEqual(results, ["None"] * 20)
def testDefaultContentType(self):
self.getPage('/')
self.assertHeader('Content-Type', 'text/html')
self.getPage('/defct/plain')
self.getPage('/')
self.assertHeader('Content-Type', 'text/plain')
self.getPage('/defct/html')
def test_cherrypy_url(self):
# Input relative to current
self.getPage('/url/leaf?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
# Input is 'absolute'; that is, relative to script_name
self.getPage('/url/leaf?path_info=/page1')
self.assertBody('%s/page1' % self.base())
self.getPage('/url/?path_info=/page1')
self.assertBody('%s/page1' % self.base())
# Single dots
self.getPage('/url/leaf?path_info=./page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf?path_info=other/./page1')
self.assertBody('%s/url/other/page1' % self.base())
self.getPage('/url/?path_info=/other/./page1')
self.assertBody('%s/other/page1' % self.base())
# Double dots
self.getPage('/url/leaf?path_info=../page1')
self.assertBody('%s/page1' % self.base())
self.getPage('/url/leaf?path_info=other/../page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf?path_info=/other/../page1')
self.assertBody('%s/page1' % self.base())
# Output relative to current path or script_name
self.getPage('/url/?path_info=page1&relative=True')
self.assertBody('page1')
self.getPage('/url/leaf?path_info=/page1&relative=True')
self.assertBody('../page1')
self.getPage('/url/leaf?path_info=../page1&relative=True')
self.assertBody('../page1')
self.getPage('/url/?path_info=other/../page1&relative=True')
self.assertBody('page1')
if __name__ == '__main__':
setup_server()
helper.testmain()
| 38.370056
| 104
| 0.545289
| 4,490
| 40,749
| 4.888419
| 0.167261
| 0.052622
| 0.023372
| 0.016766
| 0.374869
| 0.331678
| 0.270582
| 0.214042
| 0.208711
| 0.182195
| 0
| 0.024893
| 0.323713
| 40,749
| 1,061
| 105
| 38.406221
| 0.771573
| 0.132028
| 0
| 0.254821
| 0
| 0.006887
| 0.248116
| 0.083966
| 0
| 0
| 0
| 0
| 0.232782
| 1
| 0.114325
| false
| 0.002755
| 0.011019
| 0.03168
| 0.210744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f52efca4ad0dbdcec53aee2fa61bc784274e7d40
| 1,036
|
py
|
Python
|
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
day4/solution1.py
|
zirne/aoc19
|
98feea895f0113ef60738723ca976dcbef0629b9
|
[
"MIT"
] | null | null | null |
# Solution 1
def readInputFile(filename):
f = open(filename, "r")
inputString = f.read()
f.close()
return inputString
input = readInputFile("input.txt").strip()
print(input)
lowest = input.split("-")[0]
highest = input.split("-")[1]
current = int(input.split("-")[0])
print(lowest)
print(highest)
def checkNeverDecreaseRule(n):
n = str(n)
l = len(n)
i = 0
while i < l - 1:
# print("comparing " + n[i] + " with " + n[i + 1] + "...")
if int(n[i]) > int(n[i + 1]):
return False
i += 1
return True
def checkHasAdjacentSame(n):
n = str(n)
l = len(n)
i = 0
adjCount = 0
while i < l - 1:
# print("comparing " + n[i] + " with " + n[i + 1] + "...")
if n[i] == n[i + 1]:
adjCount += 1
i += 1
if adjCount >= 1:
return True
else:
return False
resultArr = []
while current <= int(highest):
if checkNeverDecreaseRule(current) and checkHasAdjacentSame(current):
resultArr.append(current)
#print(checkNeverDecreaseRule(lowest))
#print(checkHasAdjacentSame(lowest))
current += 1
print(len(resultArr))
| 18.836364
| 70
| 0.621622
| 145
| 1,036
| 4.441379
| 0.275862
| 0.031056
| 0.018634
| 0.018634
| 0.14441
| 0.14441
| 0.14441
| 0.14441
| 0.14441
| 0.10559
| 0
| 0.021557
| 0.194015
| 1,036
| 55
| 71
| 18.836364
| 0.749701
| 0.189189
| 0
| 0.35
| 0
| 0
| 0.015569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f52fa19632597f93eba421103fbc7100653b7f9d
| 763
|
py
|
Python
|
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | null | null | null |
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | null | null | null |
e2e/Tests/Transactions/Verify.py
|
rikublock/Meros
|
7a3ae9c78af388eb523bc8a2c840018fc058ef44
|
[
"CC0-1.0"
] | 1
|
2021-02-08T23:46:35.000Z
|
2021-02-08T23:46:35.000Z
|
#Transactions classes.
from e2e.Classes.Transactions.Transaction import Transaction
from e2e.Classes.Transactions.Transactions import Transactions
#TestError Exception.
from e2e.Tests.Errors import TestError
#RPC class.
from e2e.Meros.RPC import RPC
#Sleep standard function.
from time import sleep
#Verify a Transaction.
def verifyTransaction(
rpc: RPC,
tx: Transaction
) -> None:
if rpc.call("transactions", "getTransaction", [tx.hash.hex()]) != tx.toJSON():
raise TestError("Transaction doesn't match.")
#Verify the Transactions.
def verifyTransactions(
rpc: RPC,
transactions: Transactions
) -> None:
#Sleep to ensure data races aren't a problem.
sleep(2)
for tx in transactions.txs:
verifyTransaction(rpc, transactions.txs[tx])
| 23.84375
| 80
| 0.756225
| 96
| 763
| 6.010417
| 0.46875
| 0.048527
| 0.048527
| 0.090121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.141547
| 763
| 31
| 81
| 24.612903
| 0.873282
| 0.214941
| 0
| 0.222222
| 0
| 0
| 0.087838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.277778
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|