hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71dd434a3adaa696a17ec425e131077b6639bbec
| 2,995
|
py
|
Python
|
Chapter_4/lists_data_type.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
Chapter_4/lists_data_type.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
Chapter_4/lists_data_type.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
#Negative Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[-1] # elepant
spam[-3] # bat
# Getting a List from another List with Slices
spam = ['cat', 'bat', 'rat', 'elephant']
spam[0:4] # ['cat', 'bat', 'rat', 'elephant']
spam[1:3] # ['bat', 'rat']
spam[0:-1] # ['cat', 'bat', 'rat']
spam[:2] # ['cat', 'bat']
spam[1:] # ['bat', 'rat', 'elephant']
spam[:] # ['cat', 'bat', 'rat', 'elephant']
# Getting a List's length with the len() Function
spam = ['cat', 'dog', 'moose']
len(spam) # 3
# Changing Values in a List with Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[1] = 'aardvark'
spam # ['cat', 'aardvark', 'rat', 'elephant']
spam[2]=spam[1]
spam # ['cat', 'aardvark', 'aardvark', 'elephant']
spam[-1] = 12345
spam # ['cat', 'aardvark', 'aardvark', 12345]
# List Concatenation and List Replication
[1, 2, 3] + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
['X', 'Y', 'Z'] * 3
#['X', 'Y', 'Z', 'X', 'Y', 'Z', 'X', 'Y', 'Z']
spam = [1, 2, 3]
spam = spam + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
# Removing Values From Lists with del Statements
spam = ['cat', 'bat', 'rat', 'elephant']
del spam[2]
spam # ['cat', 'bat', 'elephant']
del spam[2]
spam # ['cat', 'bat']
# Using for Loops with Lists
for i in range(4):
print(i)
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for i in range(len(supplies)):
print('Index ' + str(i) + ' in supplies is: ' + supplies[i])
# The in and not in Operators
'howdy' in ['hello', 'hi', 'howdy', 'heyas'] # True
spam = ['hello', 'hi', 'howdy', 'heyas']
'cat' in spam # False
'howdy' not in spam # False
# Type in a pet name and then check wether the name is in a list of pets
myPets = ['Zophie', 'Pooka', 'Fat-tail']
print('Enter a pet name:')
name = input()
if name not in myPets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.')
# The Multiple Assignment Trick
cat = ['fat', 'gray', 'loud']
size = cat[0]
color = cat[1]
disposition = cat[2]
# type this line
cat = ['fat', 'gray', 'loud']
size, color, disposition = cat
# Using the enumerate() Function with Lists
# enumerate() Function is useful when you need both the item and item's index in loop's block
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for index, item in enumerate(supplies):
print('Index ' + str(index) + ' in supplies is: ' + item)
# Using the random.choice() and random.shuffle() Function with Lists
import random
pets = ['Dog', 'Cat', 'Moose']
random.choice(pets)
random.choice(pets)
random.choice(pets)
# random.choice(someList) to be a shorter form of someList[random.randint(0, len(someList)-1)]
import random
people = ['Alice', 'Bob', 'Carol', 'David']
random.shuffle(people)
people # ['Bob', 'Carol', 'David', 'Alice']
random.shuffle(people)
people # random list of people
#Augmented Assignment Operators
spam += 1 # spam = spam + 1
spam -= 1 # spam = spam - 1
spam *= 1 # spam = spam * 1
spam /= 1 #spam = spam / 1
spam %= 1 #spam = spam % 1
| 26.043478
| 94
| 0.600334
| 452
| 2,995
| 3.977876
| 0.269912
| 0.047275
| 0.050056
| 0.05673
| 0.267519
| 0.228587
| 0.147942
| 0.119021
| 0.048943
| 0.038932
| 0
| 0.023026
| 0.18798
| 2,995
| 114
| 95
| 26.27193
| 0.716283
| 0.435058
| 0
| 0.347826
| 0
| 0
| 0.228623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028986
| 0
| 0.028986
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
71dfbd47e154641ea34b44a5f3aa8459312d608f
| 3,268
|
py
|
Python
|
qemu/scripts/codeconverter/codeconverter/test_patching.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 44
|
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
qemu/scripts/codeconverter/codeconverter/test_patching.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 1
|
2022-03-29T02:30:28.000Z
|
2022-03-30T03:40:46.000Z
|
qemu/scripts/codeconverter/codeconverter/test_patching.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <ehabkost@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from tempfile import NamedTemporaryFile
from .patching import FileInfo, FileMatch, Patch, FileList
from .regexps import *
class BasicPattern(FileMatch):
regexp = '[abc]{3}'
@property
def name(self):
return self.group(0)
def replacement(self) -> str:
# replace match with the middle character repeated 5 times
return self.group(0)[1].upper()*5
def test_pattern_patching():
of = NamedTemporaryFile('wt')
of.writelines(['one line\n',
'this pattern will be patched: defbbahij\n',
'third line\n',
'another pattern: jihaabfed'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
matches = f.matches_of_type(BasicPattern)
assert len(matches) == 2
p2 = matches[1]
# manually add patch, to see if .append() works:
f.patches.append(p2.append('XXX'))
# apply all patches:
f.gen_patches(matches)
patched = f.get_patched_content()
assert patched == ('one line\n'+
'this pattern will be patched: defBBBBBhij\n'+
'third line\n'+
'another pattern: jihAAAAAXXXfed')
class Function(FileMatch):
regexp = S(r'BEGIN\s+', NAMED('name', RE_IDENTIFIER), r'\n',
r'(.*\n)*?END\n')
class Statement(FileMatch):
regexp = S(r'^\s*', NAMED('name', RE_IDENTIFIER), r'\(\)\n')
def test_container_match():
of = NamedTemporaryFile('wt')
of.writelines(['statement1()\n',
'statement2()\n',
'BEGIN function1\n',
' statement3()\n',
' statement4()\n',
'END\n',
'BEGIN function2\n',
' statement5()\n',
' statement6()\n',
'END\n',
'statement7()\n'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
assert len(f.matches_of_type(Function)) == 2
print(' '.join(m.name for m in f.matches_of_type(Statement)))
assert len(f.matches_of_type(Statement)) == 7
f1 = f.find_match(Function, 'function1')
f2 = f.find_match(Function, 'function2')
st1 = f.find_match(Statement, 'statement1')
st2 = f.find_match(Statement, 'statement2')
st3 = f.find_match(Statement, 'statement3')
st4 = f.find_match(Statement, 'statement4')
st5 = f.find_match(Statement, 'statement5')
st6 = f.find_match(Statement, 'statement6')
st7 = f.find_match(Statement, 'statement7')
assert not f1.contains(st1)
assert not f1.contains(st2)
assert not f1.contains(st2)
assert f1.contains(st3)
assert f1.contains(st4)
assert not f1.contains(st5)
assert not f1.contains(st6)
assert not f1.contains(st7)
assert not f2.contains(st1)
assert not f2.contains(st2)
assert not f2.contains(st2)
assert not f2.contains(st3)
assert not f2.contains(st4)
assert f2.contains(st5)
assert f2.contains(st6)
assert not f2.contains(st7)
| 31.12381
| 71
| 0.597613
| 408
| 3,268
| 4.723039
| 0.340686
| 0.056046
| 0.046705
| 0.069019
| 0.265179
| 0.213285
| 0.137519
| 0.11261
| 0.079398
| 0.046705
| 0
| 0.031879
| 0.270502
| 3,268
| 104
| 72
| 31.423077
| 0.776426
| 0.097001
| 0
| 0.2
| 0
| 0
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.05
| false
| 0
| 0.0375
| 0.025
| 0.1875
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
71e063f198be6d799932aa28d7e46247d3e2c98f
| 634
|
py
|
Python
|
Traversy Media/Python Django Dev to Deployment/Python Fundamentals/Tuples and Sets.py
|
Anim-101/CourseHub
|
570ddc2bca794c14921991d24fdf1b4a7d0beb68
|
[
"MIT"
] | 3
|
2019-11-01T17:07:13.000Z
|
2020-04-01T10:27:05.000Z
|
Traversy Media/Python Django Dev to Deployment/Python Fundamentals/Tuples and Sets.py
|
Anim-101/CourseHub
|
570ddc2bca794c14921991d24fdf1b4a7d0beb68
|
[
"MIT"
] | 18
|
2020-08-10T05:11:24.000Z
|
2021-12-03T15:13:40.000Z
|
Traversy Media/Python Django Dev to Deployment/Python Fundamentals/Tuples and Sets.py
|
Anim-101/CourseHub
|
570ddc2bca794c14921991d24fdf1b4a7d0beb68
|
[
"MIT"
] | null | null | null |
# # Simple Tuple
# fruits = ('Apple', 'Orange', 'Mango')
# # Using Constructor
# fruits = tuple(('Apple', 'Orange', 'Mango'))
# # Getting a Single Value
# print(fruits[1])
# Trying to change based on position
# fruits[1] = 'Grape'
# Tuples with one value should have trailing comma
# fruits = ('Apple')
# fruits = ('Apple',)
# # Getting length of a tupel
# print(len(fruits))
# ## Set
fruits = {'Apple', 'Orange', 'Mango', 'Apple'}
# Checking if in Set
print('Apple' in fruits)
# Add to Set
fruits.add('Grape')
# Removing from Set
fruits.remove('Grape')
# Clearing Set
fruits.clear()
# Delete set
del fruits
print(fruits)
| 16.25641
| 50
| 0.652997
| 85
| 634
| 4.870588
| 0.517647
| 0.10628
| 0.115942
| 0.10628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003846
| 0.179811
| 634
| 38
| 51
| 16.684211
| 0.792308
| 0.664038
| 0
| 0
| 0
| 0
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
71e0e6976164ccf999455f35ac70c3e13a0fe3ef
| 20,146
|
py
|
Python
|
nerblackbox/modules/ner_training/metrics/ner_metrics.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
nerblackbox/modules/ner_training/metrics/ner_metrics.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
nerblackbox/modules/ner_training/metrics/ner_metrics.py
|
flxst/nerblackbox
|
7612b95850e637be258f6bfb01274453b7372f99
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from dataclasses import asdict
from typing import List, Tuple, Callable
import numpy as np
from sklearn.metrics import accuracy_score as accuracy_sklearn
from sklearn.metrics import precision_score as precision_sklearn
from sklearn.metrics import recall_score as recall_sklearn
from sklearn.metrics import precision_recall_fscore_support as prf_sklearn
from sklearn.exceptions import UndefinedMetricWarning
import warnings
from seqeval.metrics import precision_score as precision_seqeval
from seqeval.metrics import recall_score as recall_seqeval
from seqeval.metrics import f1_score as f1_seqeval
from seqeval.scheme import IOB2, BILOU
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
class NerMetrics:
"""
On the token level, the tags are evaluated in the given annotation scheme (e.g. plain, BIO)
On the entity level, the tags are evaluated in the BIO scheme (after converting if needed)
"""
def __init__(
self,
true_flat,
pred_flat,
level,
scheme,
classes=None,
class_index=None,
verbose=False,
):
"""
:param true_flat: [np array] of shape [batch_size * seq_length]
:param pred_flat: [np array] of shape [batch_size * seq_length]
:param level: [str] 'token' or 'entity'
:param scheme: [str] e.g. 'plain', 'bio'
:param classes: [optional, list] of [str] labels to take into account for metrics -> if level = 'token'
:param class_index: [optional, int] index to take into account for metrics -> if level = 'entity'
:param verbose: [optional, bool] if True, show verbose output
"""
self.true_flat = true_flat # token -> plain. entity -> plain, bio, bilou
self.pred_flat = pred_flat # token -> plain. entity -> plain, bio, bilou
self.scheme = scheme # token -> plain. entity -> plain, bio, bilou
self.classes = classes
self.class_index = class_index
self.level = level
self.verbose = verbose
if self.scheme == "bilou":
self.scheme_entity = "bilou"
self.scheme_entity_seqeval = BILOU
else: # plain, bio
self.scheme_entity = "bio"
self.scheme_entity_seqeval = IOB2
self.results = Results()
self.failure_value = -1
assert self.level in [
"token",
"entity",
], f"ERROR! level = {self.level} unknown."
if self.level == "entity":
self.true_flat_bio: List[str] = Tags(self.true_flat,).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
self.pred_flat_bio: List[str] = Tags(self.pred_flat).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
# ASR
self.pred_flat_bio_corrected: List[str]
self.pred_flat_bio_corrected, self.results.asr_abidance = Tags(
self.pred_flat_bio
).restore_annotation_scheme_consistency(
scheme=self.scheme_entity
) # entity -> bio, bilou
def results_as_dict(self):
return asdict(self.results)
def compute(self, _metrics):
"""
computes selected metrics
----------------------------------------------------------
:param _metrics: [list] of [str], e.g. ['acc, 'precision']
:return: -
"""
warnings.filterwarnings("error")
if "acc" in _metrics:
self.accuracy()
if "precision" in _metrics or "recall" in _metrics or "f1" in _metrics:
self._compute_well_defined_classes()
if "precision" in _metrics or "f1" in _metrics:
self.precision()
if "recall" in _metrics or "f1" in _metrics:
self.recall()
if "f1" in _metrics:
self.f1_score()
if (
"asr_abidance" in _metrics
or "asr_precision" in _metrics
or "asr_recall" in _metrics
or "asr_f1" in _metrics
):
self.compute_asr_results()
warnings.resetwarnings()
def accuracy(self):
"""
computes accuracy of predictions (_np_logits) w.r.t. ground truth (_np_label_ids)
---------------------------------------------------------------------------------
:return: acc [np float]
"""
self.results.acc = accuracy_sklearn(
self.true_flat, self.pred_flat, normalize=True
)
def precision(self):
"""
computes precision (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
precision_micro [np array] for all examples
precision_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.precision_micro = self._token_evaluation(
evaluation_function=precision_sklearn, average="micro"
)
self.results.precision_macro = self._token_evaluation(
evaluation_function=precision_sklearn, average="macro"
)
elif self.level == "entity":
self.results.precision_micro = self._entity_evaluation_micro(
evaluation_function=precision_seqeval
)
self.results.precision_macro = self._entity_evaluation_macro(
evaluation_function=precision_seqeval,
)
def recall(self):
"""
computes recall (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
recall_micro [np array] for all examples
recall_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.recall_micro = self._token_evaluation(
evaluation_function=recall_sklearn, average="micro"
)
self.results.recall_macro = self._token_evaluation(
evaluation_function=recall_sklearn, average="macro"
)
elif self.level == "entity":
self.results.recall_micro = self._entity_evaluation_micro(
evaluation_function=recall_seqeval
)
self.results.recall_macro = self._entity_evaluation_macro(
evaluation_function=recall_seqeval
)
def f1_score(self):
"""
computes f1 score (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
f1_score_micro [np array] for all examples
f1_score_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.f1_micro = self._token_evaluation(
evaluation_function=prf_sklearn, average="micro"
)
self.results.f1_macro = self._token_evaluation(
evaluation_function=prf_sklearn, average="macro"
)
elif self.level == "entity":
self.results.f1_micro, self.results.f1_macro = self._entity_evaluation_f1(
evaluation_function=f1_seqeval,
)
def compute_asr_results(self):
"""
computes
- self.results.asr_precision_micro
- self.results.asr_recall_micro
- self.results.asr_f1_micro
"""
def _entity_evaluation_micro_asr(evaluation_function: Callable) -> float:
"""helper function"""
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio_corrected], # corrected !!!
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
self.results.asr_precision_micro = _entity_evaluation_micro_asr(
evaluation_function=precision_seqeval
)
self.results.asr_recall_micro = _entity_evaluation_micro_asr(
evaluation_function=recall_seqeval
)
self.results.asr_f1_micro = _entity_evaluation_micro_asr(
evaluation_function=f1_seqeval
)
def _token_evaluation(self, evaluation_function: Callable, average: str) -> float:
"""
compute precision/recall/f1 on token level
Args:
evaluation_function: precision_sklearn, recall_sklearn, prf_sklearn
average: 'micro' or 'macro'
Returns:
metric: precision/recall on token level, 'micro' or 'macro' averaged
"""
assert evaluation_function in [
precision_sklearn,
recall_sklearn,
prf_sklearn,
], f"evaluation function = {evaluation_function} unknown / not allowed."
assert average in ["micro", "macro"], f"average = {average} unknown."
if self.classes is None or len(self.classes) > 1: # "all" / "fil"
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
try:
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division="warn",
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
warn_for=("precision", "recall", "f-score"),
zero_division="warn",
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
def _entity_evaluation_micro(self, evaluation_function: Callable) -> float:
"""
compute precision/recall micro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
if self.class_index is None: # "fil"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
else: # "ind"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
except UndefinedMetricWarning:
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=0,
)[self.class_index]
except IndexError:
metric = self.failure_value
if metric == 0:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=1,
)[self.class_index]
if metric == 1:
metric = self.failure_value
except IndexError:
metric = self.failure_value
return metric
def _compute_well_defined_classes(self) -> None:
"""
Created Attributes:
results.classindices_macro: list of indices of well-defined classes in terms of precision, recall, f1
results.numberofclasses_macro: number of well-defined classes in terms of precision, recall, f1
"""
def _get_index_list(
evaluation_function: Callable, true_array, pred_array, scheme_seqeval=None
):
kwargs = (
{"mode": "strict", "scheme": scheme_seqeval}
if scheme_seqeval is not None
else {}
)
try:
metric_list = evaluation_function(
true_array,
pred_array,
average=None,
zero_division="warn",
**kwargs,
)
index_list = [i for i in range(len(metric_list))]
except UndefinedMetricWarning:
metric_list_all = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=0,
**kwargs,
)
index_list = list()
for index, metric_elem in enumerate(metric_list_all):
if metric_elem != 0:
index_list.append(index)
else:
metric_elem_alt = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=1,
**kwargs,
)[index]
if metric_elem_alt != 1:
index_list.append(index)
return index_list
if self.level == "token":
index_list_precision = _get_index_list(
evaluation_function=precision_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
index_list_recall = _get_index_list(
evaluation_function=recall_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
else:
index_list_precision = _get_index_list(
evaluation_function=precision_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
index_list_recall = _get_index_list(
evaluation_function=recall_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
self.results.classindices_macro = tuple(
[index for index in index_list_precision if index in index_list_recall]
)
if self.level == "token":
self.results.numberofclasses_macro = (
len(self.results.classindices_macro) - 1
) # disregard "O" label
else:
self.results.numberofclasses_macro = len(self.results.classindices_macro)
def _entity_evaluation_macro(
self,
evaluation_function: Callable,
) -> float:
"""
compute precision/recall macro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average="macro",
zero_division=0,
)
return metric
def _entity_evaluation_f1(
self, evaluation_function: Callable
) -> Tuple[float, float]:
"""
compute f1 micro or macro average on entity level
Args:
evaluation_function: f1_seqeval
Returns:
f1_micro: f1 on entity level, 'micro' averaged
f1_macro: f1 on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
f1_seqeval
], f"evaluation function = {evaluation_function} unknown / not allowed."
# ensure that precision and recall have been called:
# self.precision()
# self.recall()
# f1_micro
if (
self.results.precision_micro == self.failure_value
or self.results.recall_micro == self.failure_value
):
f1_micro = self.failure_value
else:
if self.class_index is None: # "fil"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
else: # "ind"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
# f1_macro
if (
self.results.precision_macro == self.failure_value
or self.results.recall_macro == self.failure_value
):
f1_macro = self.failure_value
else:
if self.class_index is None: # "fil"
metric_list = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
)
f1_macro = np.average(metric_list)
else: # "ind"
f1_macro = self.failure_value
return f1_micro, f1_macro
@dataclass
class Results:
acc: float = -1
precision_micro: float = -1
precision_macro: float = -1
recall_micro: float = -1
recall_macro: float = -1
f1_micro: float = -1
f1_macro: float = -1
classindices_macro: Tuple[float, ...] = ()
numberofclasses_macro: float = -1
asr_abidance: float = -1
asr_precision_micro: float = -1
asr_recall_micro: float = -1
asr_f1_micro: float = -1
| 36.299099
| 116
| 0.538469
| 1,971
| 20,146
| 5.236936
| 0.090309
| 0.097655
| 0.027902
| 0.021798
| 0.663244
| 0.594943
| 0.529258
| 0.461442
| 0.391106
| 0.338016
| 0
| 0.005834
| 0.378934
| 20,146
| 554
| 117
| 36.364621
| 0.819134
| 0.162762
| 0
| 0.503704
| 0
| 0
| 0.040738
| 0.005185
| 0
| 0
| 0
| 0
| 0.014815
| 1
| 0.037037
| false
| 0
| 0.037037
| 0.002469
| 0.128395
| 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
71e128bd284f8fc2eb997551cf3f8ee9632b562a
| 2,192
|
py
|
Python
|
Assignments/hw4/rank_feat_by_chi_square.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | null | null | null |
Assignments/hw4/rank_feat_by_chi_square.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | null | null | null |
Assignments/hw4/rank_feat_by_chi_square.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | 1
|
2020-12-26T01:28:41.000Z
|
2020-12-26T01:28:41.000Z
|
import sys
def readInput():
labels, features, all_features, labelCount = [], [], [], {}
l = sys.stdin.readline().strip().split(' ')
while len(l)> 1:
label = l[0]
if label not in labelCount:
labelCount[label] = 0
labelCount[label] += 1
labels.append(label)
currFeat = set()
for key in l[1:]:
feature, _ = key.split(':')
all_features.append(feature)
currFeat.add(feature)
features.append(currFeat)
l = sys.stdin.readline().strip().split(' ')
return [labels, features] , set(all_features), labelCount
def rankByChiSquared(data, features, labelCount):
labels = labelCount.keys()
dataLength = len(data[0])
n = sum(labelCount.values())
results, featureOccourences, featureNonOccourences = [], {}, {}
for feature in features:
for label in labels:
featureOccourences[label] = 0 #Initialize
for i in range(dataLength):
if feature in data[1][i]:
featureOccourences[data[0][i]] += 1 # We could how many times the feature occours in the data for each label
for label in labels:
featureNonOccourences[label] = labelCount[label] - featureOccourences[label] #count of the times it doesnt appear for each label
totalFeatureOccourences = sum(featureOccourences.values())
totalFeatureNonOccourences = sum(featureNonOccourences.values())
chi = sum([((featureOccourences[label]-(labelCount[label]*totalFeatureOccourences/n))**2/(labelCount[label]*totalFeatureOccourences/n) +(featureNonOccourences[label] - (labelCount[label] * totalFeatureNonOccourences/n))**2/(labelCount[label] * totalFeatureNonOccourences/n)) for label in labels]) #Chi squared calc
results.append([feature, chi, totalFeatureOccourences]) #save the re
[print('{} {:.5f} {}'.format(*score)) for score in sorted(results, key = lambda x:(-x[1], -x[2], x[0]), reverse=False)] #print features sorted by chi^2 value, count in text, alphabetically
if __name__ == "__main__":
data, all_features, labelCount= readInput()
results = rankByChiSquared(data, all_features, labelCount)
| 56.205128
| 323
| 0.656022
| 242
| 2,192
| 5.884298
| 0.322314
| 0.073736
| 0.058989
| 0.033708
| 0.037921
| 0.037921
| 0
| 0
| 0
| 0
| 0
| 0.009884
| 0.215328
| 2,192
| 39
| 324
| 56.205128
| 0.818023
| 0.102646
| 0
| 0.102564
| 0
| 0
| 0.011723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.025641
| 0
| 0.102564
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07b1d529111d4e2e89b3b1cd2c58ff9446e312f
| 6,642
|
py
|
Python
|
fem/fem.py
|
Pengeace/DGP-PDE-FEM
|
64b7f42ca7083b05f05c42baa6cad21084068d8c
|
[
"MIT"
] | 7
|
2019-06-26T07:25:33.000Z
|
2021-06-25T03:40:22.000Z
|
fem/fem.py
|
Pengeace/DGP-PDE-FEM
|
64b7f42ca7083b05f05c42baa6cad21084068d8c
|
[
"MIT"
] | null | null | null |
fem/fem.py
|
Pengeace/DGP-PDE-FEM
|
64b7f42ca7083b05f05c42baa6cad21084068d8c
|
[
"MIT"
] | null | null | null |
import numpy as np
import pyamg
from scipy import sparse
from scipy.spatial import Delaunay
from linsolver import sparse_solver
from triangulation.delaunay import delaunay
class Element:
def __init__(self, points, global_indexes, fem):
self.points = np.array(points)
self.global_indexes = global_indexes
self.fem = fem
self.reference_triangle = np.array([[0, 0], [1., 0], [0, 1.]])
self.reference_grad = np.array([[-1., -1], [1., 0], [0, 1.]])
def perform_calculation(self):
self._calculate_transform()
self._calculate_stiffness_matrix()
self._calulate_load_vector()
def _calculate_transform(self):
reference_coord = np.array([self.reference_triangle[:, 0], self.reference_triangle[:, 1], [1] * 3])
transformed_coord = np.array([self.points[:, 0], self.points[:, 1], [1] * 3])
trans = np.dot(transformed_coord, np.linalg.inv(reference_coord))
self.transform_matrix = trans[0:-1, 0:-1]
self.area = abs(np.linalg.det(self.transform_matrix)) / 2
def _calculate_stiffness_matrix(self):
transform_matrix_inv = np.linalg.inv(self.transform_matrix)
self.element_stiffness_matrix = np.zeros((3, 3))
for row in range(3):
for col in range(3):
part_u_left_grad = np.dot(np.dot(self.fem.A, transform_matrix_inv.T), self.reference_grad[row])
part_u_right_grad = np.dot(transform_matrix_inv.T, self.reference_grad[col])
part_u_grad = self.area * np.dot(part_u_left_grad, part_u_right_grad)
part_u = (self.area / 6.0) if row == col else (self.area / 12.0)
self.element_stiffness_matrix[row, col] = part_u_grad + self.fem.q * part_u
def _calulate_load_vector(self):
mean_f = np.mean([self.fem.get_func_value(x) for x in self.points])
self.element_load_vector = np.array([mean_f * self.area / 3] * 3)
class FiniteElement:
"""
Finite Element Method to solve the 2D Elliptic Partial Differentiation differential Equation with below form:
div(A grad(u)) + q u = func
"""
def __init__(self, points, boundaries, A, q, func, slow_solver=True):
self.points = np.array(points)
self.dirichlet_boundaries = np.array(boundaries)
self.A = A
self.q = q
self.f = func
self.slow_solver = slow_solver
self.triangles = []
self.point_num = len(points)
def solve(self):
if len(self.triangles) == 0:
self._get_mesh()
self._process_each_element()
self._calculate_global_stiffness_matrix()
self._calulate_global_load_vector()
self._deal_with_dirichlet_bound()
self._solve_linear_equations()
def update_border_and_func(self, boundaries, func):
self.dirichlet_boundaries = np.array(boundaries)
self.f = func
def get_func_value(self, x):
if isinstance(self.f, dict):
return self.f[tuple(x)]
else:
return self.f(x)
def _get_mesh(self):
if self.slow_solver:
self.triangles = delaunay(self.points)
else:
triangulation = Delaunay(self.points)
self.triangles = triangulation.simplices
def _process_each_element(self):
self.elements = []
for tri in self.triangles:
ele = Element(points=[self.points[v] for v in tri], global_indexes=tri, fem=self)
ele.perform_calculation()
self.elements.append(ele)
def _calculate_global_stiffness_matrix(self):
self.global_stiffness_matrix_row = []
self.global_stiffness_matrix_col = []
self.global_stiffness_matrix_data = []
boundary_indexes = set(self.dirichlet_boundaries[:, 0].astype('int'))
for ele in self.elements:
for row in range(3):
if ele.global_indexes[row] not in boundary_indexes:
for col in range(3):
self.global_stiffness_matrix_row.append(ele.global_indexes[row])
self.global_stiffness_matrix_col.append(ele.global_indexes[col])
self.global_stiffness_matrix_data.append(ele.element_stiffness_matrix[row, col])
def _calulate_global_load_vector(self):
self.global_load_vector = np.zeros(self.point_num)
for ele in self.elements:
for v in range(3):
self.global_load_vector[ele.global_indexes[v]] += ele.element_load_vector[v]
def _deal_with_dirichlet_bound(self):
for index, val in self.dirichlet_boundaries:
index = int(index)
self.global_stiffness_matrix_row.append(index)
self.global_stiffness_matrix_col.append(index)
self.global_stiffness_matrix_data.append(1)
self.global_load_vector[index] = val
def _solve_linear_equations(self):
if not self.slow_solver:
self.global_stiffness_matrix_csr = sparse.coo_matrix((self.global_stiffness_matrix_data, (
self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
self.solution = pyamg.solve(self.global_stiffness_matrix_csr, self.global_load_vector, verb=False,
tol=1e-10)
else:
global_stiffness_sparse = [np.array(self.global_stiffness_matrix_row),
np.array(self.global_stiffness_matrix_col),
np.array(self.global_stiffness_matrix_data)]
self.solution = sparse_solver.sparse_gauss_seidel(global_stiffness_sparse, self.global_load_vector,
sparse_input=True)
## these solver methods are for test
# self.global_stiffness = sparse.coo_matrix((self.global_stiffness_matrix_data, (
# self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
# self.solution = linsolver.jacobi(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = linsolver.gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = sparse_solver.sparse_jacobi(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
# self.solution = sparse_solver.sparse_gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
if isinstance(self.solution, str):
print("The inputs for linear solver have problems.")
| 40.012048
| 141
| 0.643029
| 839
| 6,642
| 4.79857
| 0.174017
| 0.086935
| 0.117983
| 0.124193
| 0.419771
| 0.316195
| 0.197466
| 0.140089
| 0.140089
| 0.137109
| 0
| 0.009333
| 0.257904
| 6,642
| 165
| 142
| 40.254545
| 0.807466
| 0.116983
| 0
| 0.133929
| 0
| 0
| 0.007881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133929
| false
| 0
| 0.053571
| 0
| 0.223214
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07c0e48507e0965db82bd0823c76af3d0ebb993
| 2,745
|
py
|
Python
|
custom_components/tahoma/climate_devices/dimmer_exterior_heating.py
|
MatthewFlamm/ha-tahoma
|
794e8e4a54a8e5f55622b88bb1ab5ffc3ecb0d1b
|
[
"MIT"
] | null | null | null |
custom_components/tahoma/climate_devices/dimmer_exterior_heating.py
|
MatthewFlamm/ha-tahoma
|
794e8e4a54a8e5f55622b88bb1ab5ffc3ecb0d1b
|
[
"MIT"
] | null | null | null |
custom_components/tahoma/climate_devices/dimmer_exterior_heating.py
|
MatthewFlamm/ha-tahoma
|
794e8e4a54a8e5f55622b88bb1ab5ffc3ecb0d1b
|
[
"MIT"
] | null | null | null |
"""Support for Atlantic Electrical Heater IO controller."""
import logging
from typing import List
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from ..coordinator import TahomaDataUpdateCoordinator
from ..tahoma_entity import TahomaEntity
_LOGGER = logging.getLogger(__name__)
COMMAND_GET_LEVEL = "getLevel"
COMMAND_SET_LEVEL = "setLevel"
CORE_LEVEL_STATE = "core:LevelState"
class DimmerExteriorHeating(TahomaEntity, ClimateEntity):
"""Representation of TaHoma IO Atlantic Electrical Heater."""
def __init__(self, device_url: str, coordinator: TahomaDataUpdateCoordinator):
"""Init method."""
super().__init__(device_url, coordinator)
self._saved_level = 100 - self.select_state(CORE_LEVEL_STATE)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def min_temp(self) -> float:
"""Return minimum percentage."""
return 0
@property
def max_temp(self) -> float:
"""Return maximum percentage."""
return 100
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return 100 - self.select_state(CORE_LEVEL_STATE)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
level = kwargs.get(ATTR_TEMPERATURE)
if level is None:
return
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL)
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
if self.select_state(CORE_LEVEL_STATE) == 100:
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT]
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
level = 0
if hvac_mode == HVAC_MODE_HEAT:
level = self._saved_level
else:
self._saved_level = self.target_temperature
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL)
| 31.918605
| 82
| 0.687796
| 326
| 2,745
| 5.515337
| 0.288344
| 0.053393
| 0.026696
| 0.046719
| 0.159622
| 0.159622
| 0.143493
| 0.107898
| 0.107898
| 0.107898
| 0
| 0.009381
| 0.223315
| 2,745
| 85
| 83
| 32.294118
| 0.833959
| 0.146448
| 0
| 0.196429
| 0
| 0
| 0.013914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.125
| 0
| 0.446429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07c5c23f946a28e4cc418a3bd4c6debbb0d6123
| 3,271
|
py
|
Python
|
elit/components/mtl/attn/joint_encoder.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | 4
|
2021-09-17T15:23:31.000Z
|
2022-02-28T10:18:04.000Z
|
elit/components/mtl/attn/joint_encoder.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
elit/components/mtl/attn/joint_encoder.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-03-02 13:32
from typing import Optional, Union, Dict, Any
import torch
from torch import nn
from transformers import PreTrainedTokenizer
from elit.components.mtl.attn.attn import TaskAttention
from elit.components.mtl.attn.transformer import JointEncoder
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbeddingModule, ContextualWordEmbedding
from elit.layers.scalar_mix import ScalarMixWithDropoutBuilder
from elit.layers.transformers.utils import pick_tensor_for_each_token
class JointContextualWordEmbeddingModule(ContextualWordEmbeddingModule):
def __init__(self, field: str, transformer: str, transformer_tokenizer: PreTrainedTokenizer, average_subwords=False,
scalar_mix: Union[ScalarMixWithDropoutBuilder, int] = None, word_dropout=None,
max_sequence_length=None, ret_raw_hidden_states=False, transformer_args: Dict[str, Any] = None,
trainable=True, training=True) -> None:
super().__init__(field, transformer, transformer_tokenizer, average_subwords, scalar_mix, word_dropout,
max_sequence_length, ret_raw_hidden_states, transformer_args, trainable, training)
self.adapter: TaskAttention = None
def forward(self, batch: dict, mask=None, **kwargs):
input_ids: torch.LongTensor = batch[f'{self.field}_input_ids']
if self.max_sequence_length and input_ids.size(-1) > self.max_sequence_length:
raise NotImplementedError('Sentence length exceeded and sliding window has not been implemented yet')
token_span: torch.LongTensor = batch.get(f'{self.field}_token_span', None)
token_type_ids: torch.LongTensor = batch.get(f'{self.field}_token_type_ids', None)
attention_mask = input_ids.ne(0)
if self.word_dropout:
input_ids = self.word_dropout(input_ids)
# noinspection PyTypeChecker
transformer: JointEncoder = self.transformer
encoder_outputs = transformer(input_ids, attention_mask, token_type_ids)
outputs = dict()
for task_name, encoder_output in encoder_outputs.items():
encoder_output = encoder_output[0]
outputs[task_name] = pick_tensor_for_each_token(encoder_output, token_span, self.average_subwords)
return outputs
class JointContextualWordEmbedding(ContextualWordEmbedding):
def module(self, training=True, **kwargs) -> Optional[nn.Module]:
return JointContextualWordEmbeddingModule(self.field,
self.transformer,
self._transformer_tokenizer,
self.average_subwords,
self.scalar_mix,
self.word_dropout,
self.max_sequence_length,
self.ret_raw_hidden_states,
self.transformer_args,
self.trainable,
training=training)
| 53.622951
| 120
| 0.63956
| 327
| 3,271
| 6.134557
| 0.345566
| 0.027916
| 0.042373
| 0.026919
| 0.107677
| 0.037886
| 0.037886
| 0.037886
| 0
| 0
| 0
| 0.006905
| 0.291654
| 3,271
| 60
| 121
| 54.516667
| 0.858869
| 0.025986
| 0
| 0
| 0
| 0
| 0.045269
| 0.022634
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.195652
| 0.021739
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07ce9c764d3c52f1697472892d9c4a14a2d9b6a
| 5,140
|
py
|
Python
|
jaxrl/agents/sac_v1/sac_v1_learner.py
|
anuragajay/jaxrl
|
a37414aea9e281f19719ccfc09702b32e1ef4e44
|
[
"MIT"
] | 157
|
2021-03-12T04:30:53.000Z
|
2021-06-10T11:28:48.000Z
|
jaxrl/agents/sac_v1/sac_v1_learner.py
|
anuragajay/jaxrl
|
a37414aea9e281f19719ccfc09702b32e1ef4e44
|
[
"MIT"
] | 3
|
2021-09-23T21:13:28.000Z
|
2021-11-19T12:32:34.000Z
|
jaxrl/agents/sac_v1/sac_v1_learner.py
|
anuragajay/jaxrl
|
a37414aea9e281f19719ccfc09702b32e1ef4e44
|
[
"MIT"
] | 17
|
2021-06-15T13:38:35.000Z
|
2022-03-17T15:25:23.000Z
|
"""Implementations of algorithms for continuous control."""
import functools
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxrl.agents.sac import temperature
from jaxrl.agents.sac.actor import update as update_actor
from jaxrl.agents.sac.critic import target_update
from jaxrl.agents.sac_v1.critic import update_q, update_v
from jaxrl.datasets import Batch
from jaxrl.networks import critic_net, policies
from jaxrl.networks.common import InfoDict, Model, PRNGKey
@functools.partial(jax.jit, static_argnames=('update_target'))
def _update_jit(
rng: PRNGKey, actor: Model, critic: Model, value: Model,
target_value: Model, temp: Model, batch: Batch, discount: float,
tau: float, target_entropy: float, update_target: bool
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_critic, critic_info = update_q(critic, target_value, batch, discount)
rng, key = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, new_critic, temp, batch)
rng, key = jax.random.split(rng)
new_value, value_info = update_v(key, new_actor, new_critic, value, temp,
batch, True)
if update_target:
new_target_value = target_update(new_value, target_value, tau)
else:
new_target_value = target_value
new_temp, alpha_info = temperature.update(temp, actor_info['entropy'],
target_entropy)
return rng, new_actor, new_critic, new_value, new_target_value, new_temp, {
**critic_info,
**value_info,
**actor_info,
**alpha_info
}
class SACV1Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
action_dim = actions.shape[-1]
if target_entropy is None:
self.target_entropy = -action_dim / 2
else:
self.target_entropy = target_entropy
self.tau = tau
self.target_update_period = target_update_period
self.discount = discount
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_def = policies.NormalTanhPolicy(hidden_dims, action_dim)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optax.adam(learning_rate=actor_lr))
critic_def = critic_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = critic_net.ValueCritic(hidden_dims)
value = Model.create(value_def,
inputs=[critic_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_value = Model.create(value_def,
inputs=[critic_key, observations])
temp = Model.create(temperature.Temperature(init_temperature),
inputs=[temp_key],
tx=optax.adam(learning_rate=temp_lr))
self.actor = actor
self.critic = critic
self.value = value
self.target_value = target_value
self.temp = temp
self.rng = rng
self.step = 1
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policies.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
self.step += 1
new_rng, new_actor, new_critic, new_value, new_target_value, new_temp, info = _update_jit(
self.rng, self.actor, self.critic, self.value, self.target_value,
self.temp, batch, self.discount, self.tau, self.target_entropy,
self.step % self.target_update_period == 0)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_value = new_target_value
self.temp = new_temp
return info
| 35.694444
| 107
| 0.596887
| 604
| 5,140
| 4.865894
| 0.195364
| 0.052399
| 0.023818
| 0.024498
| 0.139163
| 0.106499
| 0.106499
| 0.06805
| 0.06805
| 0.033345
| 0
| 0.012799
| 0.315953
| 5,140
| 143
| 108
| 35.944056
| 0.823094
| 0.029767
| 0
| 0.055556
| 0
| 0
| 0.004034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.12037
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07d1faf3d069567748feca41784098709e225b2
| 1,143
|
py
|
Python
|
quick_pandas.py
|
chenmich/google-ml-crash-course-exercises
|
d610f890d53b1537a3ce80531ce1ff2df1f5dc84
|
[
"MIT"
] | null | null | null |
quick_pandas.py
|
chenmich/google-ml-crash-course-exercises
|
d610f890d53b1537a3ce80531ce1ff2df1f5dc84
|
[
"MIT"
] | null | null | null |
quick_pandas.py
|
chenmich/google-ml-crash-course-exercises
|
d610f890d53b1537a3ce80531ce1ff2df1f5dc84
|
[
"MIT"
] | null | null | null |
import pandas as pd
print(pd.__version__)
city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
#city_population_table = pd.DataFrame(({'City name': city_names, 'Population': population}))
california_houseing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_houseing_dataframe.describe()
california_houseing_dataframe.head()
#some error
#california_houseing_dataframe.hist('housing_median_age')
cities = pd.DataFrame({'City name': city_names, 'Population': population})
#print(type(cities['City name']))
#print(cities['City name'])
#print(type(cities['City name'][1]))
#print(cities['City name'][1])
#print(type(cities[0:2]))
#print(cities[0:2])
#print(population / 1000)
import numpy as np
np.log(population)
#print(population.apply(lambda val: val > 10000))
cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92])
#print(cities)
cities['Population density'] = cities['Population'] / cities['Area square miles']
#print(cities)
print(city_names.index)
print(cities.reindex([2, 0, 1]))
print(cities)
| 40.821429
| 130
| 0.750656
| 159
| 1,143
| 5.251572
| 0.408805
| 0.092216
| 0.129341
| 0.045509
| 0.201198
| 0.11497
| 0.11497
| 0.11497
| 0
| 0
| 0
| 0.047214
| 0.073491
| 1,143
| 28
| 131
| 40.821429
| 0.741265
| 0.366579
| 0
| 0
| 0
| 0
| 0.26264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e081143f3b7d183dce44c075a5350bb5aba51e51
| 797
|
py
|
Python
|
backend/app/main.py
|
ianahart/blog
|
fc52e15a8b56bd4c6482065de7e21f8b31f5d765
|
[
"MIT"
] | null | null | null |
backend/app/main.py
|
ianahart/blog
|
fc52e15a8b56bd4c6482065de7e21f8b31f5d765
|
[
"MIT"
] | null | null | null |
backend/app/main.py
|
ianahart/blog
|
fc52e15a8b56bd4c6482065de7e21f8b31f5d765
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from dotenv import load_dotenv
from fastapi.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI()
load_dotenv()
app.include_router(api_router, prefix=settings.API_V1_STR)
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
if __name__ == "__main__":
# Use this for debugging purposes only
# pyright: reportGeneralTypeIssues=false
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug")
| 27.482759
| 68
| 0.711418
| 103
| 797
| 5.252427
| 0.504854
| 0.011091
| 0.07024
| 0.096118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.196989
| 797
| 28
| 69
| 28.464286
| 0.829688
| 0.130489
| 0
| 0
| 0
| 0
| 0.03193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e083bd5dc380bfdfeec4ef47f0529d4de1bded9d
| 658
|
py
|
Python
|
test_data/samples/alembic_template_output.py
|
goldstar611/ssort
|
05c35ec89dd9ff391ae824c17ed974340e2f5597
|
[
"MIT"
] | 238
|
2021-04-25T11:45:54.000Z
|
2022-03-30T10:49:58.000Z
|
test_data/samples/alembic_template_output.py
|
goldstar611/ssort
|
05c35ec89dd9ff391ae824c17ed974340e2f5597
|
[
"MIT"
] | 54
|
2021-03-29T21:40:00.000Z
|
2022-03-29T20:26:31.000Z
|
test_data/samples/alembic_template_output.py
|
goldstar611/ssort
|
05c35ec89dd9ff391ae824c17ed974340e2f5597
|
[
"MIT"
] | 4
|
2022-02-09T02:37:11.000Z
|
2022-02-23T03:07:50.000Z
|
"""Example revision
Revision ID: fdf0cf6487a3
Revises:
Create Date: 2021-08-09 17:55:19.491713
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "fdf0cf6487a3"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"example",
sa.Column("example_id", sa.Integer(), nullable=False),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurements")
# ### end Alembic commands ###
| 20.5625
| 65
| 0.668693
| 79
| 658
| 5.493671
| 0.582278
| 0.062212
| 0.096774
| 0.105991
| 0.202765
| 0.202765
| 0.202765
| 0.202765
| 0
| 0
| 0
| 0.061069
| 0.203647
| 658
| 31
| 66
| 21.225806
| 0.767176
| 0.433131
| 0
| 0
| 0
| 0
| 0.122024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e085ecf717371ed12e23c9cc1a56cd7685b27bf6
| 790
|
py
|
Python
|
.archived/snakecode/0173.py
|
gearbird/calgo
|
ab48357100de2a5ea47fda2d9f01ced6dc73fa79
|
[
"MIT"
] | 4
|
2022-01-13T03:39:01.000Z
|
2022-03-15T03:16:33.000Z
|
.archived/snakecode/0173.py
|
gearbird/calgo
|
ab48357100de2a5ea47fda2d9f01ced6dc73fa79
|
[
"MIT"
] | null | null | null |
.archived/snakecode/0173.py
|
gearbird/calgo
|
ab48357100de2a5ea47fda2d9f01ced6dc73fa79
|
[
"MIT"
] | 1
|
2021-12-09T12:33:07.000Z
|
2021-12-09T12:33:07.000Z
|
from __future__ import annotations
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None):
self.val = val
self.left = left
self.right = right
class BSTIterator:
def __init__(self, root: Optional[TreeNode]):
self.stack: list[TreeNode] = []
self.cur = root
def next(self) -> int:
if not self.hasNext():
raise StopIteration()
self.cur = self.stack[-1].right
return self.stack.pop().val
def hasNext(self) -> bool:
while self.cur:
self.stack.append(self.cur)
self.cur = self.cur.left
if self.stack: return True
return False
| 27.241379
| 104
| 0.606329
| 99
| 790
| 4.717172
| 0.424242
| 0.089936
| 0.094218
| 0.068522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003559
| 0.288608
| 790
| 28
| 105
| 28.214286
| 0.827402
| 0.043038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e08673d5cfeabd8f8dd35fbf0c18643dc03a42fd
| 1,933
|
py
|
Python
|
test/msan/lit.cfg.py
|
QuarkTheAwesome/compiler-rt-be-aeabi
|
79e7d2bd981b0f38d60d90f8382c6cd5389b95d0
|
[
"Apache-2.0"
] | 118
|
2016-02-29T01:55:45.000Z
|
2021-11-08T09:47:46.000Z
|
test/msan/lit.cfg.py
|
QuarkTheAwesome/compiler-rt-be-aeabi
|
79e7d2bd981b0f38d60d90f8382c6cd5389b95d0
|
[
"Apache-2.0"
] | 27
|
2016-06-20T23:47:01.000Z
|
2019-10-25T17:41:37.000Z
|
test/msan/lit.cfg.py
|
QuarkTheAwesome/compiler-rt-be-aeabi
|
79e7d2bd981b0f38d60d90f8382c6cd5389b95d0
|
[
"Apache-2.0"
] | 73
|
2016-03-01T00:50:56.000Z
|
2021-12-05T03:30:35.000Z
|
# -*- Python -*-
import os
# Setup config name.
config.name = 'MemorySanitizer' + getattr(config, 'name_suffix', 'default')
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
# Setup default compiler flags used with -fsanitize=memory option.
clang_msan_cflags = (["-fsanitize=memory",
"-mno-omit-leaf-frame-pointer",
"-fno-omit-frame-pointer",
"-fno-optimize-sibling-calls"] +
[config.target_cflags] +
config.debug_info_flags)
# Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD.
if config.host_os == 'FreeBSD':
clang_msan_cflags += ["-lexecinfo", "-fPIC"]
clang_msan_cxxflags = config.cxx_mode_flags + clang_msan_cflags
# Flags for KMSAN invocation. This is C-only, we're not interested in C++.
clang_kmsan_cflags = (["-fsanitize=kernel-memory"] +
[config.target_cflags] +
config.debug_info_flags)
def build_invocation(compile_flags):
return " " + " ".join([config.clang] + compile_flags) + " "
config.substitutions.append( ("%clang_msan ", build_invocation(clang_msan_cflags)) )
config.substitutions.append( ("%clangxx_msan ", build_invocation(clang_msan_cxxflags)) )
config.substitutions.append( ("%clang_kmsan ", build_invocation(clang_kmsan_cflags)) )
# Default test suffixes.
config.suffixes = ['.c', '.cc', '.cpp']
if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']:
config.unsupported = True
# For mips64, mips64el we have forced store_context_size to 1 because these
# archs use slow unwinder which is not async signal safe. Therefore we only
# check the first frame since store_context size is 1.
if config.host_arch in ['mips64', 'mips64el']:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK'))
else:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK'))
| 40.270833
| 88
| 0.685463
| 239
| 1,933
| 5.351464
| 0.460251
| 0.049257
| 0.097733
| 0.037529
| 0.173573
| 0.129789
| 0.129789
| 0
| 0
| 0
| 0
| 0.006341
| 0.18417
| 1,933
| 47
| 89
| 41.12766
| 0.804692
| 0.253492
| 0
| 0.148148
| 0
| 0
| 0.227654
| 0.071229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0.037037
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e087918e3b0a051f5fa5fa67e1527b89fc1bd61b
| 9,606
|
py
|
Python
|
dataschema/entity.py
|
vingkan/sql_tools
|
5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T19:47:16.000Z
|
2022-03-30T19:47:16.000Z
|
dataschema/entity.py
|
vingkan/sql_tools
|
5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1
|
[
"Apache-2.0"
] | null | null | null |
dataschema/entity.py
|
vingkan/sql_tools
|
5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T04:07:12.000Z
|
2022-03-30T04:07:12.000Z
|
#
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilityes for checking and."""
import dataclasses
import datetime
import decimal
from types import ModuleType
from typing import NewType, Union
# In your data declaration python modules define a JAVA_PACKAGE
# variable at top level to specify the corresponding Java package of generated
# classes.
JAVA_PACKAGE = 'JAVA_PACKAGE'
def GetJavaPackage(module: ModuleType) -> str:
if hasattr(module, JAVA_PACKAGE):
return getattr(module, JAVA_PACKAGE)
else:
return module.__name__
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_EXPECTED_DICT_KEYS = set([
'__module__', '__annotations__', '__doc__', '__dict__', '__weakref__',
'__dataclass_params__', '__dataclass_fields__', _SCHEMA_ANNOTATIONS
])
_EXPECTED_FUNCTIONS = ['__init__', '__repr__', '__eq__', '__hash__']
_BASE_TYPES = set([
int, bytes, str, float, bool, datetime.date, datetime.datetime,
decimal.Decimal
])
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_CLASS_ID = 0
def _Annotate(cls=None, annotation=None):
"""Annotates a class or a type. `annotation` should from annotation.py"""
def Wrap(cls):
schema_annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
schema_annotations.extend(getattr(cls, _SCHEMA_ANNOTATIONS))
if isinstance(annotation, list):
schema_annotations.extend(annotation)
else:
schema_annotations.append(annotation)
global _CLASS_ID
_CLASS_ID += 1
supertype = cls
if hasattr(cls, '__supertype__'):
supertype = cls.__supertype__
annotated_type = NewType(f'Annotated_{_CLASS_ID}', supertype)
setattr(annotated_type, _SCHEMA_ANNOTATIONS, schema_annotations)
return annotated_type
if cls is None:
return Wrap
return Wrap(cls)
def Annotate(cls, annotation):
"""Annotates a field type with the provided annotation."""
return _Annotate(cls, annotation=annotation)
def IsAnnotatedType(field_cls: type):
"""If provided field_cls is an annotated type."""
return hasattr(field_cls, _SCHEMA_ANNOTATIONS)
def GetAnnotatedType(field_cls: type):
"""Returns the original type behind the annotation (if any)."""
if IsAnnotatedType(field_cls) and hasattr(field_cls, '__supertype__'):
return field_cls.__supertype__
return field_cls
def IsOptionalType(field_cls: type):
"""If the field_cls looks like an Optional[...] type."""
return (hasattr(field_cls, '__origin__')
# pylint: disable=comparison-with-callable
and field_cls.__origin__ == Union and len(field_cls.__args__) == 2
and field_cls.__args__[1] == type(None))
def GetOptionalType(field_cls: type):
"""Returns the type of optional & annotation or None if not optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return None
def GetOriginalType(field_cls: type):
"""Returns the type of field_cls, behind annotations and Optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return field_cls
def GetStructuredTypeName(field_cls: type):
"""Returns the structure type name for a type, behind annotation."""
field_cls = GetAnnotatedType(field_cls)
if not hasattr(field_cls, '__origin__'):
return None
if field_cls.__origin__ is dict:
return 'dict'
elif field_cls.__origin__ is list:
return 'list'
elif field_cls.__origin__ is set:
return 'set'
return None
def IsBasicType(field_cls: type):
"""If the type field_cls looks like one of the basic field types."""
if GetAnnotatedType(field_cls) in _BASE_TYPES:
return True
_MAX_DEPTH = 30
class FieldTypeChecker:
"""Checks the type of a fields in a dataclass."""
def __init__(self, field_name, field_cls):
self.field_name = field_name
self.field_cls = field_cls
self.checked = set()
def _check(self, field_cls, depth):
"""Check if the type of a field is acceptable."""
if field_cls in self.checked:
return True
if depth > _MAX_DEPTH:
raise ValueError(f'Recursive field type found at {field_cls} '
f'for field `{self.field_name}`')
field_cls = GetAnnotatedType(field_cls)
if IsBasicType(field_cls):
return True
if hasattr(field_cls, '__origin__'):
if field_cls.__origin__ is dict:
self._check(field_cls.__args__[0], depth)
self._check(field_cls.__args__[1], depth)
elif field_cls.__origin__ is list:
self._check(field_cls.__args__[0], depth)
elif field_cls.__origin__ is set:
self._check(field_cls.__args__[0], depth)
elif ( # pylint: disable=comparison-with-callable
field_cls.__origin__ == Union and
len(field_cls.__args__) == 2 and
field_cls.__args__[1] == type(None)):
if GetStructuredTypeName(field_cls) is not None:
raise ValueError('Cannot have Optional structured fields.'
'(e.g. Optional[List or Set or Dict])')
# Optional[...]
self._check(field_cls.__args__[0], depth)
else:
raise ValueError(f'Invalid origin class for {field_cls}: '
f'`{field_cls.__origin__}`')
else:
checker = DataclassChecker(field_cls)
if checker.check_is_dataclass() is not None:
raise ValueError(
f'Invalid type surfaced for field `{self.field_name}`: '
f'`{self.field_cls}` - {field_cls} is not acceptable')
err = checker.check()
if err:
errors = '; '.join(err)
raise ValueError(
f'Subfield entity class of field `{self.field_name}` '
f'({field_cls}) has type errors: {errors}')
self.checked.add(field_cls)
return True
def check(self):
return self._check(self.field_cls, 0)
class DataclassChecker:
"""Checks if a python type and its structure conforms to Dataclass specs."""
def __init__(self, cls: type):
self.cls = cls
self.nested = []
def _err_class(self):
return f'dataclass class `{self.cls}` in module `{self.cls.__module__}`'
def _err_field(self, field: str):
return (f'field `{field}` of dataclass class `{self.cls.__name__}` '
f'in module `{self.cls.__module__}`')
def check_is_dataclass(self):
if not dataclasses.is_dataclass(self.cls):
return f'{self._err_class()} is not a dataclass'
return None
def _check_type(self, field_name, field_cls):
try:
FieldTypeChecker(field_name, field_cls).check()
return None
except ValueError as e:
return f'{e.args[0]} for {self._err_field(field_name)}'
def _check_field_type(self, field_name, field_cls):
return self._check_type(GetOriginalType(field_name), field_cls)
def _check_dataclass_members(self):
err = []
for key in self.cls.__dict__:
# pylint: disable=comparison-with-callable,unidiomatic-typecheck
if type(self.cls.__dict__[key]) == type:
self.nested.append(
(key, DataclassChecker(self.cls.__dict__[key])))
elif callable(
self.cls.__dict__[key]) and key not in _EXPECTED_FUNCTIONS:
err.append(f'{self._err_class()} has unexpected function '
f'member `{key}`')
elif (key not in _EXPECTED_DICT_KEYS and
key not in _EXPECTED_FUNCTIONS and
key not in self.cls.__annotations__):
err.append(f'{self._err_class()} has unexpected / non annotated'
f' member `{key}`: {self.cls.__dict__[key]}')
for field in dataclasses.fields(self.cls):
field_err = self._check_field_type(field.name, field.type)
if field_err is not None:
err.append(field_err)
for nested in self.nested:
for nested_err in nested[1].check():
err.append(f'{nested_err}; for nested sub-class '
f'{nested[0]} of {self._err_class()}')
return err
def check(self):
err_dataclass = self.check_is_dataclass()
if err_dataclass is not None:
return [err_dataclass]
return self._check_dataclass_members()
def SchemaAnnotations(cls: type):
"""Returns the schema annotations of a type."""
annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
annotations.extend(cls.__schema_annotations__)
return annotations
| 36.249057
| 80
| 0.636581
| 1,162
| 9,606
| 4.907917
| 0.185886
| 0.098194
| 0.029458
| 0.013677
| 0.256707
| 0.182185
| 0.095388
| 0.0761
| 0.052955
| 0.052955
| 0
| 0.003849
| 0.269727
| 9,606
| 264
| 81
| 36.386364
| 0.809123
| 0.169686
| 0
| 0.233696
| 0
| 0
| 0.142187
| 0.026509
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.027174
| 0.021739
| 0.353261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e089b61952e1f4d0f2fb6443737c623fe7ff04be
| 10,577
|
py
|
Python
|
jaxline/utils_test.py
|
lorenrose1013/jaxline
|
29fca9944651d42139d4103fe12ef29b24812eb6
|
[
"Apache-2.0"
] | 1
|
2022-01-07T02:44:07.000Z
|
2022-01-07T02:44:07.000Z
|
jaxline/utils_test.py
|
SuperXiang/jaxline
|
f1503f6a06d46aa9eb2eab8eed6130895148ffa2
|
[
"Apache-2.0"
] | null | null | null |
jaxline/utils_test.py
|
SuperXiang/jaxline
|
f1503f6a06d46aa9eb2eab8eed6130895148ffa2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jaxline's utils."""
import functools
import itertools as it
import time
from unittest import mock
from absl.testing import absltest
from absl.testing import flagsaver
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
class PyPrefetchTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(list(utils.py_prefetch(lambda: ())), [])
def testBaseCase(self):
self.assertEqual(list(utils.py_prefetch(lambda: range(100))),
list(range(100)))
def testBadFunction(self):
def _bad_function():
raise ValueError
iterable = utils.py_prefetch(_bad_function)
with self.assertRaises(ValueError):
next(iterable)
def testBadFunctionIteration(self):
def _bad_iterable():
yield 1
raise ValueError
iterable = utils.py_prefetch(_bad_iterable)
self.assertEqual(next(iterable), 1)
with self.assertRaises(ValueError):
next(iterable)
class TreePsumTest(absltest.TestCase):
def testBaseCase(self):
# pick leaf objects with leading dimension one as these tests will
# be run on a single device.
data = {"a": jnp.array([1]), "b": jnp.array([2])}
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testEmpty(self):
data = {"a": jnp.array([]), "b": jnp.array([])}
with self.assertRaises(ZeroDivisionError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testSingleLeafTree(self):
data = jnp.array([1])
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testNotNumpy(self):
data = [1]
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNumDevicesMismatch(self):
data = jnp.array([1, 2]) # assumes 2 devices but we only have 1
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNoPmapWrapper(self):
with self.assertRaises(NameError): # axis_name will be undefined
utils.tree_psum(jnp.array([1]), axis_name="i")
def testAxisNameMismatch(self):
data = jnp.array([1])
with self.assertRaises(NameError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="j")(data)
class MakeAsyncTest(absltest.TestCase):
def testBaseCase(self):
"""Tests correct execution for single call."""
r = []
async_fn = utils.make_async()(lambda: r.append("a"))
async_fn()
time.sleep(1)
self.assertListEqual(r, ["a"])
def testNonBlocking(self):
"""Tests async function doesn't block the main thread."""
r = []
async_fn = utils.make_async()(lambda: r.append((time.sleep(5), "a")))
r.append((None, "b"))
async_fn().result()
self.assertListEqual(r, [(None, "b"), (None, "a")])
def testSerialExecution(self):
"""Tests multiple calls to async function execute serially."""
r = []
a = lambda: r.append((time.sleep(5), "a"))
b = lambda: r.append((None, "b"))
async_fn = utils.make_async()(lambda f: f())
async_fn(a)
async_fn(b).result()
self.assertListEqual(r, [(None, "a"), (None, "b")])
def testErrorOnNextCall(self):
"""Tests background thread error raised in main thread on next call."""
@utils.make_async()
def async_fn():
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on next call
async_fn()
def testSubsequentCallsDontRun(self):
"""Tests that subsequent calls don't run after an error has occurred."""
runs = []
@utils.make_async()
def async_fn():
runs.append(None)
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
for _ in range(2):
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on
# subsequent calls and _bad_function will not be run.
async_fn()
self.assertListEqual(runs, [None])
def testErrorInBackgroundThread(self):
"""Tests background thread raises the error."""
@utils.make_async()
def async_fn():
raise ValueError()
future = async_fn() # pylint: disable=assignment-from-no-return
self.assertIsNotNone(future.exception())
class TestBroadcast(absltest.TestCase):
def test_bcast_local_devices(self):
self.assertEqual(utils.bcast_local_devices(jnp.zeros([])),
jnp.zeros([jax.local_device_count()]))
self.assertEqual(utils.bcast_local_devices(jnp.ones([])),
jnp.ones([jax.local_device_count()]))
def test_bcast_local_devices_empty_tree(self):
self.assertIsNone(utils.bcast_local_devices(None))
self.assertEqual(utils.bcast_local_devices({}), {})
def test_bcast_local_devices_tree(self):
num_devices = jax.local_device_count()
tree = utils.bcast_local_devices({"ones": jnp.ones([]),
"zeros": jnp.zeros([])})
self.assertEqual(tree, {"ones": jnp.ones([num_devices]),
"zeros": jnp.zeros([num_devices])})
class TestLogActivity(absltest.TestCase):
@mock.patch("jaxline.utils.logging.info")
def test_log_success(self, mock_info):
"""Tests that logging an activity is successful."""
with utils.log_activity("for test"):
pass
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_info.assert_any_call("[jaxline] %s finished.", "for test")
@mock.patch("absl.logging.exception")
@mock.patch("absl.logging.info")
def test_log_failure(self, mock_info, mock_exc):
"""Tests that an error thrown by an activity is correctly caught."""
with self.assertRaisesRegex(ValueError, "Intentional"):
with utils.log_activity("for test"):
raise ValueError("Intentional")
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_exc.assert_any_call("[jaxline] %s failed with error.", "for test")
class TestSpecializeRngHostDevice(absltest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSpecializeRngHostDevice, cls).setUpClass()
rng = jax.random.PRNGKey(0)
cls.rng = jnp.broadcast_to(
rng, (jax.local_device_count(),) + rng.shape)
def test_unique_device(self):
"""Tests that rngs are unique across devices."""
mode = "unique_host_unique_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], jax.local_device_count())
def test_same_device(self):
"""Tests rngs are same across devices."""
mode = "unique_host_same_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], 1)
def test_unique_host(self):
"""Tests rngs unique between hosts."""
mode = "unique_host_same_device"
with mock.patch.object(utils.jax, "host_id", return_value=0):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng0 = specialize_func(self.rng, host_id_devices)
with mock.patch.object(utils.jax, "host_id", return_value=1):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng1 = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(np.concatenate([rng0, rng1], axis=0), axis=0).shape[0], 2)
class TestRendezvous(absltest.TestCase):
def test_rendezvous(self):
"""Test that rendezvous doesn't fail."""
utils.rendezvous()
class TestJaxlineDisablePmapJit(absltest.TestCase):
@mock.patch.object(utils.chex, "fake_pmap_and_jit", autospec=True)
def test_pmap_jit_disabled(self, mock_fake_pmap_and_jit):
"""Tests pmap/jit are disabled if --jaxline_disable_pmap_jit is set."""
with self.subTest("PmapJitNotDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=False):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_not_called()
with self.subTest("PmapJitDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=True):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_called_once()
class DoubleBufferTest(absltest.TestCase):
def test_double_buffer(self):
if jax.default_backend() != "gpu":
self.skipTest("Only necessary on GPU.")
n = jax.local_device_count()
dataset = it.repeat(np.ones([n]))
iterator = iter(utils.double_buffer(dataset))
batch_ptrs = []
while len(batch_ptrs) < 4:
batch = next(iterator)
ptrs = [b.unsafe_buffer_pointer() for b in batch.device_buffers]
batch_ptrs.append(ptrs)
del batch
self.assertEqual(batch_ptrs[0], batch_ptrs[2])
self.assertEqual(batch_ptrs[1], batch_ptrs[3])
self.assertNotEqual(batch_ptrs[0], batch_ptrs[1])
self.assertNotEqual(batch_ptrs[2], batch_ptrs[3])
if __name__ == "__main__":
absltest.main()
| 32.345566
| 80
| 0.681573
| 1,411
| 10,577
| 4.924167
| 0.213324
| 0.025331
| 0.025907
| 0.025907
| 0.450921
| 0.382412
| 0.337939
| 0.288716
| 0.266983
| 0.256908
| 0
| 0.006381
| 0.185024
| 10,577
| 326
| 81
| 32.444785
| 0.799652
| 0.174246
| 0
| 0.354067
| 0
| 0
| 0.052894
| 0.013773
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.162679
| false
| 0.004785
| 0.047847
| 0
| 0.253589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e08cc87c4cfc35f91dfef4447a5dc8af61c7fede
| 545
|
py
|
Python
|
problems/108.py
|
mengshun/Leetcode
|
8bb676f2fff093e1417a4bed13d9ad708149be78
|
[
"MIT"
] | null | null | null |
problems/108.py
|
mengshun/Leetcode
|
8bb676f2fff093e1417a4bed13d9ad708149be78
|
[
"MIT"
] | null | null | null |
problems/108.py
|
mengshun/Leetcode
|
8bb676f2fff093e1417a4bed13d9ad708149be78
|
[
"MIT"
] | null | null | null |
"""
108. 将有序数组转换为二叉搜索树
"""
from TreeNode import TreeNode
class Solution:
def sortedArrayToBST(self, nums: [int]) -> TreeNode:
def dfs(left, right):
if left > right:
return None
mid = left + (right - left) // 2
root = TreeNode(nums[mid])
root.left = dfs(left, mid-1)
root.right = dfs(mid+1, right)
return root
return dfs(0, len(nums)-1)
t = [-10,-3,0,5,9]
obj = Solution()
node = obj.sortedArrayToBST(t)
node.preorderTraversal()
| 18.793103
| 56
| 0.543119
| 66
| 545
| 4.484848
| 0.484848
| 0.091216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038043
| 0.324771
| 545
| 28
| 57
| 19.464286
| 0.766304
| 0.033028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e091211c57418837730aea76bfdd4d9fd710e048
| 1,978
|
py
|
Python
|
src/riotwatcher/riotwatcher.py
|
TheBoringBakery/Riot-Watcher
|
6e05fffe127530a75fd63e67da37ba81489fd4fe
|
[
"MIT"
] | 2
|
2020-10-06T23:33:01.000Z
|
2020-11-22T01:58:43.000Z
|
src/riotwatcher/riotwatcher.py
|
TheBoringBakery/Riot-Watcher
|
6e05fffe127530a75fd63e67da37ba81489fd4fe
|
[
"MIT"
] | null | null | null |
src/riotwatcher/riotwatcher.py
|
TheBoringBakery/Riot-Watcher
|
6e05fffe127530a75fd63e67da37ba81489fd4fe
|
[
"MIT"
] | null | null | null |
from .Deserializer import Deserializer
from .RateLimiter import RateLimiter
from .Handlers import (
DeprecationHandler,
DeserializerAdapter,
DictionaryDeserializer,
RateLimiterAdapter,
ThrowOnErrorHandler,
TypeCorrectorHandler,
)
from .Handlers.RateLimit import BasicRateLimiter
from ._apis import BaseApi
from ._apis.riot import AccountApi
class RiotWatcher:
"""
RiotWatcher class is intended to be the main interaction point with the generic Riot APIs.
"""
def __init__(
self,
api_key: str,
timeout: int = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection to
the Riot API
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to Handlers.RateLimit.BasicRateLimiter.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is Handlers.DictionaryDeserializer.
"""
if not api_key:
raise ValueError("api_key must be set!")
handler_chain = [
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._account = AccountApi(self._base_api)
@property
def account(self) -> AccountApi:
"""
Interface to the Account Endpoint
:rtype: riot.AccountApi
"""
return self._account
| 31.396825
| 104
| 0.637513
| 190
| 1,978
| 6.521053
| 0.410526
| 0.029056
| 0.016142
| 0.025827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.303337
| 1,978
| 62
| 105
| 31.903226
| 0.899129
| 0.35996
| 0
| 0
| 0
| 0
| 0.017544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.171429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e09178ade395a6b6c4b0853c972ab7664e0aa556
| 4,175
|
py
|
Python
|
webots_ros2_core/webots_ros2_core/devices/gps_device.py
|
TaoYibo1866/webots_ros2
|
a72c164825663cebbfd27e0649ea51d3abf9bbed
|
[
"Apache-2.0"
] | 176
|
2019-09-06T07:02:05.000Z
|
2022-03-27T12:41:10.000Z
|
webots_ros2_core/webots_ros2_core/devices/gps_device.py
|
TaoYibo1866/webots_ros2
|
a72c164825663cebbfd27e0649ea51d3abf9bbed
|
[
"Apache-2.0"
] | 308
|
2019-08-20T12:56:23.000Z
|
2022-03-29T09:49:22.000Z
|
webots_ros2_core/webots_ros2_core/devices/gps_device.py
|
omichel/webots_ros2
|
5b59d0b1fbeff4c3f75a447bd152c10853f4691b
|
[
"Apache-2.0"
] | 67
|
2019-11-03T00:58:09.000Z
|
2022-03-18T07:11:28.000Z
|
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots GPS device wrapper for ROS2."""
from rclpy.qos import QoSReliabilityPolicy, qos_profile_sensor_data
from std_msgs.msg import Float32
from sensor_msgs.msg import NavSatFix, NavSatStatus
from geometry_msgs.msg import PointStamped
from .sensor_device import SensorDevice
from controller import GPS
class GpsDevice(SensorDevice):
"""
ROS2 wrapper for Webots GPS node.
Creates suitable ROS2 interface based on Webots
[GPS](https://cyberbotics.com/doc/reference/gps) node instance:
It allows the following functinalities:
- Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84
- Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL
Args:
----
node (WebotsNode): The ROS2 node.
device_key (str): Unique identifier of the device used for configuration.
wb_device (Gps): Webots node of type GPS.
Kwargs:
params (dict): Inherited from `SensorDevice` + the following::
dict: {
'timestep': int, # Publish period in ms (default 128ms)
}
"""
def __init__(self, node, device_key, wb_device, params=None):
super().__init__(node, device_key, wb_device, params)
self.__speed_publisher = None
self.__gps_publisher = None
self.__coordinate_system = self._wb_device.getCoordinateSystem()
# Exit if disabled
if self._disable:
return
# Change default timestep
self._timestep = 128
qos_sensor_reliable = qos_profile_sensor_data
qos_sensor_reliable.reliability = QoSReliabilityPolicy.RELIABLE
# Create topics
self.__speed_publisher = node.create_publisher(
Float32, self._topic_name + '/speed', qos_sensor_reliable)
if self.__coordinate_system == GPS.WGS84:
self.__gps_publisher = node.create_publisher(
NavSatFix, self._topic_name + '/gps', qos_sensor_reliable)
else:
self.__gps_publisher = node.create_publisher(
PointStamped, self._topic_name + '/gps', qos_sensor_reliable)
def step(self):
stamp = super().step()
if not stamp:
return
if self.__gps_publisher.get_subscription_count() > 0 or \
self.__speed_publisher.get_subscription_count() > 0 or \
self._always_publish:
self._wb_device.enable(self._timestep)
msg = Float32()
msg.data = self._wb_device.getSpeed()
self.__speed_publisher.publish(msg)
if self.__coordinate_system == GPS.WGS84:
msg = NavSatFix()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.latitude = self._wb_device.getValues()[0]
msg.longitude = self._wb_device.getValues()[1]
msg.altitude = self._wb_device.getValues()[2]
msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_UNKNOWN
msg.status.service = NavSatStatus.SERVICE_GPS
self.__gps_publisher.publish(msg)
else:
msg = PointStamped()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.point.x = self._wb_device.getValues()[0]
msg.point.y = self._wb_device.getValues()[1]
msg.point.z = self._wb_device.getValues()[2]
self.__gps_publisher.publish(msg)
else:
self._wb_device.disable()
| 37.954545
| 84
| 0.648144
| 492
| 4,175
| 5.247967
| 0.359756
| 0.040279
| 0.046476
| 0.048799
| 0.268784
| 0.224632
| 0.091402
| 0.037955
| 0.037955
| 0.037955
| 0
| 0.013789
| 0.270419
| 4,175
| 109
| 85
| 38.302752
| 0.833881
| 0.324072
| 0
| 0.267857
| 0
| 0
| 0.005138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e091f0178c86f87d30aea273c60c55d5d07a1bdf
| 24,241
|
py
|
Python
|
players/jeff.py
|
jtreim/cant-stop
|
0ef1a2da67e4232a4ad2be150e950e8f1914a851
|
[
"MIT"
] | null | null | null |
players/jeff.py
|
jtreim/cant-stop
|
0ef1a2da67e4232a4ad2be150e950e8f1914a851
|
[
"MIT"
] | null | null | null |
players/jeff.py
|
jtreim/cant-stop
|
0ef1a2da67e4232a4ad2be150e950e8f1914a851
|
[
"MIT"
] | 2
|
2020-12-29T21:30:54.000Z
|
2021-01-02T05:23:23.000Z
|
from .player import Player
class JeffPlayer(Player):
"""
JeffPlayer focuses on the odds for continuing turns.
To pick which move, calculates a move value based on odds of continued
turns, moving forward less likely columns when possible, and winning
columns over opponents.
"""
ODDS = 'odds'
ROLLS = 'rolls'
ONE_COLUMN_ODDS = {
'2': { ODDS: .13, ROLLS: 0 },
'3': { ODDS: .23, ROLLS: 0 },
'4': { ODDS: .36, ROLLS: 0 },
'5': { ODDS: .45, ROLLS: 1 },
'6': { ODDS: .56, ROLLS: 1 },
'7': { ODDS: .64, ROLLS: 2 },
'8': { ODDS: .56, ROLLS: 1 },
'9': { ODDS: .45, ROLLS: 1 },
'10': { ODDS: .36, ROLLS: 0 },
'11': { ODDS: .23, ROLLS: 0 },
'12': { ODDS: .13, ROLLS: 0 },
}
TWO_COLUMN_ODDS = {
'2': {
'3': { ODDS: .32, ROLLS: 0 },
'4': { ODDS: .44, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .63, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .67, ROLLS: 2 },
'9': { ODDS: .56, ROLLS: 1 },
'10': { ODDS: .47, ROLLS: 1 },
'11': { ODDS: .36, ROLLS: 1 },
'12': { ODDS: .26, ROLLS: 0 },
},
'3': {
'4': { ODDS: .47, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .64, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .68, ROLLS: 2 },
'9': { ODDS: .64, ROLLS: 2 },
'10': { ODDS: .56, ROLLS: 1 },
'11': { ODDS: .45, ROLLS: 1 },
'12': { ODDS: .36, ROLLS: 1 },
},
'4': {
'5': { ODDS: .61, ROLLS: 2 },
'6': { ODDS: .72, ROLLS: 3 },
'7': { ODDS: .77, ROLLS: 3 },
'8': { ODDS: .75, ROLLS: 3 },
'9': { ODDS: .68, ROLLS: 3 },
'10': { ODDS: .67, ROLLS: 2 },
'11': { ODDS: .56, ROLLS: 1 },
'12': { ODDS: .47, ROLLS: 1 },
},
'5': {
'6': { ODDS: .73, ROLLS: 3 },
'7': { ODDS: .78, ROLLS: 4 },
'8': { ODDS: .77, ROLLS: 3 },
'9': { ODDS: .75, ROLLS: 2 },
'10': { ODDS: .69, ROLLS: 2 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 1 },
},
'6': {
'7': { ODDS: .84, ROLLS: 5 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .77, ROLLS: 3 },
'10': { ODDS: .75, ROLLS: 3 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .67, ROLLS: 2 },
},
'7': {
'8': { ODDS: .84, ROLLS: 5 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .77, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'8': {
'9': { ODDS: .73, ROLLS: 3 },
'10': { ODDS: .72, ROLLS: 3 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'9': {
'10': { ODDS: .61, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .53, ROLLS: 1 },
},
'10': {
'11': { ODDS: .47, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'11': {
'12': { ODDS: .32, ROLLS: 0 }
},
}
THREE_COLUMN_ODDS = {
'2': {
'3': {
'4': { ODDS: .52, ROLLS: 1 },
'5': { ODDS: .58, ROLLS: 1 },
'6': { ODDS: .68, ROLLS: 2 },
'7': { ODDS: .75, ROLLS: 3 },
'8': { ODDS: .76, ROLLS: 3 },
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'4': {
'5': { ODDS: .66, ROLLS: 2 },
'6': { ODDS: .76, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .74, ROLLS: 3 },
'11': { ODDS: .63, ROLLS: 2 },
'12': { ODDS: .55, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .83, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .88, ROLLS: 7 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .81, ROLLS: 4 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'9': {
'10': { ODDS: .71, ROLLS: 2 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'10': {
'11': { ODDS: .58, ROLLS: 1 },
'12': { ODDS: .55, ROLLS: 1 },
},
'11': {
'12': { ODDS: .44, ROLLS: 1 },
},
},
'3': {
'4': {
'5': { ODDS: .67, ROLLS: 2 },
'6': { ODDS: .74, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .80, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .58, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .81, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .84, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'9': {
'10': { ODDS: .78, ROLLS: 4 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'10': {
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'11': {
'12': { ODDS: .53, ROLLS: 1 },
},
},
'4': {
'5': {
'6': { ODDS: .80, ROLLS: 4 },
'7': { ODDS: .85, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .80, ROLLS: 4 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .71, ROLLS: 2 },
},
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'8': {
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .82, ROLLS: 5 },
'12': { ODDS: .81, ROLLS: 4 },
},
'9': {
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'11': {
'12': { ODDS: .63, ROLLS: 2 },
},
},
'5': {
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .85, ROLLS: 6 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .84, ROLLS: 5 },
},
'8': {
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .71, ROLLS: 2 },
},
},
'6': {
'7': {
'8': { ODDS: .92, ROLLS: 12 },
'9': { ODDS: .91, ROLLS: 10 },
'10': { ODDS: .90, ROLLS: 9 },
'11': { ODDS: .89, ROLLS: 8 },
'12': { ODDS: .89, ROLLS: 8 },
},
'8': {
'9': { ODDS: .90, ROLLS: 9 },
'10': { ODDS: .91, ROLLS: 10 },
'11': { ODDS: .85, ROLLS: 6 },
'12': { ODDS: .88, ROLLS: 7 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .81, ROLLS: 4 },
'12': { ODDS: .83, ROLLS: 5 },
},
'10': {
'11': { ODDS: .80, ROLLS: 4 },
'12': { ODDS: .82, ROLLS: 5 },
},
'11': {
'12': { ODDS: .76, ROLLS: 3 },
},
},
'7': {
'8': {
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .86, ROLLS: 6 },
'12': { ODDS: .86, ROLLS: 6 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'10': {
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'11': {
'12': { ODDS: .75, ROLLS: 3 },
},
},
'8': {
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .77, ROLLS: 3 },
'12': { ODDS: .77, ROLLS: 3 },
},
'10': {
'11': { ODDS: .74, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .68, ROLLS: 2 },
},
},
'9': {
'10': {
'11': { ODDS: .67, ROLLS: 2 },
'12': { ODDS: .66, ROLLS: 2 },
},
'11': {
'12': { ODDS: .58, ROLLS: 1 },
},
},
'10': {
'11': {
'12': { ODDS: .52, ROLLS: 1 },
},
},
}
NEW_COLUMN_PENALTY = 1
FINISH_COLUMN_REWARD = 1
FAVORITE_COLUMN_THRESHOLD = 2/3
CONTESTED_COLUMN = 1
MY_PROGRESS_MODIFIER = .5
OPPONENT_PROGRESS_MODIFIER = .5
STEP_DIVISOR = .08
ROUGH_ODDS_THRESHOLD = .2
DESPERATION_TURNS = 2
def get_progress(self, board, changes):
"""
Returns progress percentages for leader's & player's progress
Leaders are opponents farthest for each given column
"""
leader_progress = {}
my_progress = {}
for key in board.keys():
leader_progress[key] = {}
leader = board[key]['players'][0][0]
lead = board[key]['players'][0][1] / board[key]['steps']
if leader == self.name:
leader = board[key]['players'][1][0]
lead = board[key]['players'][1][1]
for player in board[key]['players']:
progress = player[1] / board[key]['steps']
if lead < progress and player[0] != self.name:
leader = player[0]
lead = progress
if player[0] == self.name:
my_progress[key] = player[1] + changes[key]
my_progress[key] /= board[key]['steps']
leader_progress[key]['leader'] = leader
leader_progress[key]['progress'] = lead
return leader_progress, my_progress
def get_started_columns(self, changes):
"""
Return list of columns that I've started according to changes
"""
started = []
for col in changes.keys():
if col == 'turn':
continue
if changes[col] > 0:
started.append(col)
return sorted(started, key=lambda column: int(column))
def get_finished_columns(self, board, my_progress):
"""
Return a list of all columns finished, including those finished with
my current progress.
"""
finished = []
for key in board.keys():
for player in board[key]['players']:
if player[1] == board[key]['steps']:
finished.append(key)
if key not in finished and my_progress[key] == 1:
finished.append(key)
return sorted(finished, key=lambda column: int(column))
def continue_based_on_odds(self, started, turns):
"""
Determine whether to continue simply based on optimal number of
turns to take.
"""
if len(started) == 3:
col1, col2, col3 = started[0], started[1], started[2]
return self.THREE_COLUMN_ODDS[col1][col2][col3][self.ROLLS] > turns
if len(started) == 2:
col1, col2 = started[0], started[1]
return self.TWO_COLUMN_ODDS[col1][col2][self.ROLLS] > turns
return self.ONE_COLUMN_ODDS[started[0]][self.ROLLS] > turns
def continue_based_on_new_column(self, board, started, finished, turns):
"""
Continue based on chances of getting a new valid column.
Rough estimation for converting 2 column odds to 3 columns.
"""
base_odds = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ODDS]
base_rolls = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for col in available:
odds += (base_odds * self.ONE_COLUMN_ODDS[col][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def continue_based_on_new_columns(self, board, started, finished, turns):
"""
Continue based on chances of getting 2 new valid columns.
Rough estimation for converting 1 column odds to 3 columns.
"""
base_odds = self.ONE_COLUMN_ODDS[started[0]][self.ODDS]
base_rolls = self.ONE_COLUMN_ODDS[started[0]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for i in range(len(available)):
for j in range(i+1, len(available)):
col1, col2 = available[i], available[j]
odds += (base_odds * self.TWO_COLUMN_ODDS[col1][col2][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def opponent_might_win(self, leader_progress):
"""
Check to see if opponent might win in the next turn.
"""
opponents = {}
for col in leader_progress.keys():
leader = leader_progress[col]['leader']
if leader == self.name:
continue
if leader not in opponents.keys():
opponents[leader] = 0
if leader_progress[col]['progress'] == 1.0:
opponents[leader] += 1
if opponents[leader] >= 2:
return True
return False
def started_columns_are_contested(
self, board, changes, my_progress, started):
"""
Check to see if any of my columns I've started are currently contested.
"""
for col in started:
players = board[col]['players']
step_size = 1 / board[col]['steps']
for player in players:
if player[0] == self.name:
continue
# Opponent is within 1/3 of my progress, and it's not finished
if abs(my_progress[col] - player[1] * step_size) <= 1/3 and \
my_progress[col] != 1:
return True
def did_finish_column(self, started, my_progress):
"""
Did I finish a column this turn?
"""
for col in started:
if my_progress[col] == 1.0:
return True
def is_continuing_turn(self, board, changes):
"""
Decide to continue rolling. Based on if I just won the game,
optimal rolling turns, I finished a column, and
number of columns already finished in the game.
"""
leader_progress, my_progress = self.get_progress(board, changes)
started_columns = self.get_started_columns(changes)
finished_columns = self.get_finished_columns(board, my_progress)
# No reason to stop before starting 3 columns and none are finished.
if len(started_columns) < 3 and len(finished_columns) == 0:
return True
# Stop if I won
if len(self.get_my_finished(my_progress)) >= 3:
return False
# If I finished a column, let's just end there.
if self.did_finish_column(started_columns, my_progress):
return False
# If I started 3 columns, and I'm not finishing a column,
# just roll optimal number of times.
if len(started_columns) == 3:
return self.continue_based_on_odds(
started_columns, changes['turn'])
# Columns are finished, but fewer than 3 columns started
if len(started_columns) == 2:
return self.continue_based_on_new_column(
board, started_columns, finished_columns, changes['turn'])
elif len(started_columns) == 1:
return self.continue_based_on_new_columns(
board, started_columns, finished_columns, changes['turn'])
# Shouldn't ever get here...continuing without starting a column...
return True
def determine_move_value(self, move, leader_progress, my_progress, board, started):
"""
Assign a move value primarily based on odds of continuing turns, with
bias towards not starting new columns and finishing columns.
"""
value = 0
if len(move) == 2 and move[0] != move[1]:
col1, col2 = str(move[0]), str(move[1])
value = self.TWO_COLUMN_ODDS[col1][col2][self.ODDS]
elif len(move) == 2:
col = str(move[0])
value = 2 * (self.ONE_COLUMN_ODDS[col][self.ODDS])
else:
col = str(move[0])
value = self.ONE_COLUMN_ODDS[col][self.ODDS]
unique_columns = set(move)
for c in unique_columns:
col = str(c)
step_size = 1 / board[col]['steps']
# Reward for finishing a column
if my_progress[col] + step_size == 1:
value += self.FINISH_COLUMN_REWARD
# Penalize for starting new columns
if str(c) not in started:
value -= self.NEW_COLUMN_PENALTY
# Less likely columns are desirable when 3 columns have started
if len(started) == 3:
value += (1 - self.ONE_COLUMN_ODDS[col][self.ODDS])
return value
def get_my_finished(self, my_progress):
finished_columns = []
for col in my_progress.keys():
if my_progress[col] == 1:
finished_columns.append(col)
return finished_columns
def look_for_the_win(self, board, my_progress, moves):
winning_move = None
finished = self.get_my_finished(my_progress)
for move in moves:
columns_finished = 0
# Consider moving twice on same column
if len(move) == 2 and move[0] == move[1]:
col = str(move[0])
step_size = 2 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
else:
# Otherwise, maybe I can finish two at a time
for m in move:
col = str(m)
step_size = 1 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
# If finishing these columns wins me the game, let's do it
if len(finished) + columns_finished >= 3:
winning_move = move
break
return winning_move
def compare_with_leader(self, leader_progress, my_progress, board, col):
step_size = 1 / board[col]['steps']
return (my_progress[col] - leader_progress[col]['progress']) / step_size
def choose_move(self, moves, board, changes, invalid_move=False):
leader_progress, my_progress = self.get_progress(board, changes)
started = self.get_started_columns(changes)
# Look for moves that let me win
best_move = self.look_for_the_win(board, my_progress, moves)
if best_move is not None:
return best_move
# Choose move based on best move value
best_move = moves[0]
best_move_value = self.determine_move_value(
best_move, leader_progress, my_progress, board, started)
for i in range(1, len(moves)):
move = moves[i]
move_value = self.determine_move_value(
move, leader_progress, my_progress, board, started)
if move_value > best_move_value:
best_move = move
best_move_value = move_value
return best_move
| 35.806499
| 95
| 0.413143
| 2,681
| 24,241
| 3.648266
| 0.092503
| 0.034352
| 0.020243
| 0.022084
| 0.472344
| 0.381352
| 0.342705
| 0.260812
| 0.227993
| 0.184541
| 0
| 0.092439
| 0.431459
| 24,241
| 676
| 96
| 35.859467
| 0.617254
| 0.083825
| 0
| 0.442857
| 0
| 0
| 0.027876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.001786
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e09525abcb7cde902261ff8255cd7d2143781fb5
| 8,471
|
py
|
Python
|
PyMaSC/handler/mappability.py
|
ronin-gw/PyMaSC
|
70c32b647017e162e0b004cadcf4f59a2d4012b6
|
[
"MIT"
] | 2
|
2018-04-20T13:34:16.000Z
|
2021-07-13T16:20:28.000Z
|
PyMaSC/handler/mappability.py
|
ronin-gw/PyMaSC
|
70c32b647017e162e0b004cadcf4f59a2d4012b6
|
[
"MIT"
] | 1
|
2021-03-16T11:08:46.000Z
|
2021-03-16T17:26:15.000Z
|
PyMaSC/handler/mappability.py
|
ronin-gw/PyMaSC
|
70c32b647017e162e0b004cadcf4f59a2d4012b6
|
[
"MIT"
] | null | null | null |
import logging
import os
import json
from multiprocessing import Process, Queue, Lock
import numpy as np
from PyMaSC.core.mappability import MappableLengthCalculator
from PyMaSC.utils.progress import ProgressHook, MultiLineProgressManager
from PyMaSC.utils.compatible import tostr, xrange
from PyMaSC.utils.output import prepare_outdir
from PyMaSC.utils.calc import exec_worker_pool
logger = logging.getLogger(__name__)
class BWIOError(IOError):
pass
class JSONIOError(IOError):
pass
class NeedUpdate(Exception):
pass
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.long, np.float, np.float_)):
return float(obj)
elif isinstance(obj, (np.uint, np.int32, np.int64)):
return int(obj)
else:
return super(self, NumpyEncoder).default(obj)
class MappabilityHandler(MappableLengthCalculator):
@staticmethod
def calc_mappable_len_required_shift_size(readlen, max_shift):
return max_shift - readlen + 1 if max_shift > 2*readlen - 1 else readlen
def __init__(self, path, max_shift=0, readlen=0, map_path=None, nworker=1):
max_shift = self.calc_mappable_len_required_shift_size(readlen, max_shift)
self.nworker = nworker
if not os.access(path, os.R_OK):
reason = "file is unreadable." if os.path.isfile(path) else "no such file."
logger.critical("Failed to open '{}': {}".format(path, reason))
raise BWIOError
super(MappabilityHandler, self).__init__(path, max_shift)
self.close()
self._progress.disable_bar()
self.need_save_stats = True
if map_path:
self.map_path = map_path
else:
self.map_path = os.path.splitext(path)[0] + "_mappability.json"
if not os.path.exists(self.map_path):
self._check_saving_directory_is_writable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
elif not os.path.isfile(self.map_path):
logger.critical("Specified path is not file: '{}'".format(self.map_path))
raise JSONIOError
elif not os.access(self.map_path, os.R_OK):
logger.error("Failed to read '{}'".format(self.map_path))
else:
self._try_load_mappability_stats()
if self.need_save_stats:
self._check_stats_is_overwritable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
else:
logger.info("Use mappability stats read from '{}'".format(self.map_path))
def _check_saving_directory_is_writable(self):
dirname = os.path.dirname(self.map_path)
dirname = dirname if dirname else '.'
if not prepare_outdir(dirname, logger):
raise JSONIOError
def _try_load_mappability_stats(self):
try:
stats = self._read_mappability_stats()
except IOError as e:
logger.error("Failed to read '{}'".format(self.map_path))
logger.error("[Errno {}] {}".format(e.errno, e.message))
except (TypeError, OverflowError, ValueError, KeyError, IndexError) as e:
logger.error("Failed to load json file: '{}'".format(self.map_path))
except NeedUpdate:
pass
else:
self._load_mappability_stats(stats)
def _read_mappability_stats(self):
with open(self.map_path) as f:
stats = json.load(f)
for k in ("max_shift", "__whole__", "references"):
if k not in stats:
logger.error("Mandatory key '{}' not found.".format(k))
raise KeyError(k)
if stats["max_shift"] < self.max_shift:
logger.info("Specified shift length longer than former analysis. The stats will be updated.")
raise NeedUpdate
if stats["max_shift"] != len(stats["__whole__"]) - 1:
logger.error("Max shift length for whole genome unmatched.")
raise IndexError
for ref in self.chromsizes:
if ref not in stats["references"]:
logger.error("Reference '{}' not found.".format(ref))
raise KeyError(ref)
if stats["max_shift"] != len(stats["references"][ref]) - 1:
logger.error("Max shift length for 'ref' unmatched.".format(ref))
raise IndexError
return stats
def _load_mappability_stats(self, stats):
self.mappable_len = stats["__whole__"][:self.max_shift + 1]
self.chrom2mappable_len = {ref: b[:self.max_shift + 1] for ref, b in stats["references"].items()}
self.chrom2is_called = {ref: True for ref in self.chromsizes}
self.is_called = True
self.need_save_stats = False
def _check_stats_is_overwritable(self):
if not os.access(self.map_path, os.W_OK):
logger.critical("Failed to overwrite '{}'".format(self.map_path))
raise JSONIOError
else:
logger.warning("Existing file '{}' will be overwritten.".format(self.map_path))
def save_mappability_stats(self):
if not self.need_save_stats:
return logger.info("Mappability stats updating is not required.")
logger.info("Save mappable length to '{}'".format(self.map_path))
try:
with open(self.map_path, 'w') as f:
json.dump({
"max_shift": self.max_shift,
"__whole__": self.mappable_len,
"references": self.chrom2mappable_len
}, f, indent=4, sort_keys=True, cls=NumpyEncoder)
except IOError as e:
logger.error("Faild to output: {}\n[Errno {}] {}".format(
e.filename, e.errno, e.message))
self.need_save_stats = False
def calc_mappability(self):
target_chroms = [tostr(c) for c, b in self.chrom2is_called.items() if b is False]
if not target_chroms:
return self._sumup_mappability()
order_queue = Queue()
report_queue = Queue()
logger_lock = Lock()
progress = MultiLineProgressManager()
workers = [MappabilityCalcWorker(self.path, self.max_shift, order_queue, report_queue, logger_lock)
for _ in range(min(self.nworker, len(target_chroms)))]
with exec_worker_pool(workers, target_chroms, order_queue):
while not self.is_called:
chrom, obj = report_queue.get()
if chrom is None: # update progress
chrom, body = obj
with logger_lock:
progress.update(chrom, body)
else:
length = obj
self.chrom2mappable_len[chrom] = tuple(length)
self.chrom2is_called[chrom] = True
if all(self.chrom2is_called.values()):
self.is_called = True
with logger_lock:
progress.erase(chrom)
progress.clean()
self._sumup_mappability()
def _sumup_mappability(self):
for length in self.chrom2mappable_len.values():
for i in xrange(self.max_shift + 1):
self.mappable_len[i] += length[i]
class MappabilityCalcWorker(Process):
def __init__(self, path, max_shift, order_queue, report_queue, logger_lock):
super(MappabilityCalcWorker, self).__init__()
self.calculator = MappableLengthCalculator(path, max_shift, logger_lock)
self.calculator._progress.disable_bar()
self.order_queue = order_queue
self.report_queue = report_queue
self.logger_lock = logger_lock
self.calculator._progress = ProgressHook(report_queue)
def run(self):
with self.logger_lock:
logger.debug("{}: Hello. My pid is {}.".format(self.name, os.getpid()))
while True:
chrom = self.order_queue.get()
if chrom is None:
break
with self.logger_lock:
logger.debug("{}: Process {}...".format(self.name, chrom))
self.calculator.calc_mappability(chrom)
self.report_queue.put((chrom, self.calculator.chrom2mappable_len[chrom]))
with self.logger_lock:
logger.debug("{}: Goodbye.".format(self.name))
self.calculator.close()
| 37.816964
| 107
| 0.613387
| 1,001
| 8,471
| 4.982018
| 0.1998
| 0.041708
| 0.037497
| 0.027271
| 0.245238
| 0.176058
| 0.097052
| 0.075797
| 0.060156
| 0.025266
| 0
| 0.004285
| 0.283674
| 8,471
| 223
| 108
| 37.986547
| 0.817568
| 0.001771
| 0
| 0.196629
| 0
| 0
| 0.104802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073034
| false
| 0.022472
| 0.05618
| 0.005618
| 0.202247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0986b7dc3912a34a19f7612f40be9b6072d9a7e
| 15,310
|
py
|
Python
|
lib/twitter_utils.py
|
Vman45/ask-alexa-twitter
|
1711005e51db1f66beb2e41e762c39ee003273aa
|
[
"MIT"
] | 310
|
2015-07-30T17:05:06.000Z
|
2020-12-19T18:39:39.000Z
|
lib/twitter_utils.py
|
Vman45/ask-alexa-twitter
|
1711005e51db1f66beb2e41e762c39ee003273aa
|
[
"MIT"
] | 29
|
2015-12-08T22:10:47.000Z
|
2017-10-06T16:40:05.000Z
|
lib/twitter_utils.py
|
Vman45/ask-alexa-twitter
|
1711005e51db1f66beb2e41e762c39ee003273aa
|
[
"MIT"
] | 73
|
2015-11-12T06:56:53.000Z
|
2020-09-13T22:23:44.000Z
|
import requests
import jsonpickle
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs, urlencode
import cherrypy
from collections import defaultdict
import json
import os
import re
from collections import defaultdict
# For readable serializations
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
class LocalCache(object):
""" Generic class for encapsulating twitter credential caching """
server_data_template = "{}.server"
user_data_template = "{0}.user.{1}"
def __init__(self, backup = "tmp/twitter.cache"):
self.backup = backup #Unique identifier for the backup of this cache
self.memcache = {
"users" : defaultdict(lambda : {}),
"server": defaultdict(lambda : {})
}
self.deserialize()
def users(self):
return self.memcache['users']
def set_user_state(self, user_id, state):
self.memcache['users'][user_id] = state
def update_user_state(self, user_id, state = {}):
self.memcache['users'][user_id].update(state)
def get_user_state(self, user_id):
return self.memcache['users'][user_id]
def clear_user_state(self, user_id):
return self.memcache['users'][user_id].clear()
def update_server_state(self, state_dict):
self.memcache['server'].update(state_dict)
def get_server_state(self):
return self.memcache['server']
def clear_server_state(self):
return self.memcache['server'].clear()
def initialize_user_queue(self, user_id, queue):
self.memcache['users'][user_id]['user_queue'] = ReadableQueue(queue)
def user_queue(self, user_id):
if 'user_queue' in self.memcache['users'][user_id]:
return self.memcache['users'][user_id]['user_queue']
def server_fname(self):
return self.server_data_template.format(self.backup)
def user_fname(self, user):
return self.user_data_template.format(self.backup, user)
def deserialize(self):
cache_loaded = False
if os.path.exists(self.server_fname()) and not os.path.isdir(self.backup):
try:
self.memcache = { "server" : {},
"users" : {} }
with open(self.server_fname()) as backupfile:
print ("Attempting to reload cache")
self.memcache['server'] = jsonpickle.decode(backupfile.read())
print ("Server cache loaded", json.dumps(self.memcache, indent=4))
for user in self.memcache['server']['user_list']:
# Try to load as much user data as possible
if os.path.exists(self.user_fname(user)):
print ("found path for user", user)
with open(self.user_fname(user)) as userfile:
user_data = jsonpickle.decode(userfile.read())
self.memcache['users'][user] = user_data
cache_loaded = True
except Exception as e:
print ("Cache file corrupted...")
raise e
if not cache_loaded:
print ("Cache could not be loaded")
pass
else:
print ("CACHE LOADED SUCCESSFULLY!")
def serialize(self):
json_to_serialize = self.memcache['server']
user_list = list(self.users().keys())
json_to_serialize.update({"user_list" : user_list})
with open(self.server_fname(), 'w') as backup_server:
# Serialize Server:
json_encoded = jsonpickle.encode(json_to_serialize)
backup_server.write(json_encoded)
for user in user_list:
user_data = self.get_user_state(user)
json_encoded = jsonpickle.encode(user_data)
with open(self.user_fname(user), 'w') as userfile:
userfile.write(json_encoded)
class ReadableQueue(object):
def __init__(self, queue=[], pos=0):
self.hashmap = { "queue" : [(i, e) for i,e in enumerate(queue)],
"pos" : pos }
return
def queue(self):
return self.hashmap['queue']
def is_empty(self):
return len(self.queue()) == 0
def is_finished(self):
return self.pos() == len(self.queue())
def pos(self):
return self.hashmap['pos']
def set_pos(self, val):
self.hashmap['pos'] = val
def get_next(self, offset=1):
if self.pos() < len(self.queue()):
temp_queue = self.queue()[self.pos(): self.pos() + offset]
self.set_pos(self.pos() + offset)
if self.pos() > len(self.queue()): self.set_pos(len(self.queue()))
return temp_queue
def read_out_next(self, offset=1):
return " ".join([readable.read_out(index) for index,readable in self.get_next(offset)])
def has_prev(self):
return self.pos() > 0
def get_prev(self, offset=1):
if self.pos() > 0:
self.set_pos(self.pos() - offset)
if self.pos() < 0:
offset = offset + self.pos()
# [1, current(2), 3] get_prev(offeset=3)
# pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1
self.set_pos(0)
return self.queue()[self.pos() : offset]
return None
def read_out_prev(self, offset=1):
return " ".join([readable.read_out() for readable in self.get_prev(offset)])
#Local cache caches tokens for different users
local_cache = LocalCache()
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
class Tweet(object):
def __init__(self, json_obj):
self.tweet = json_obj
def get_id(self):
return self.tweet['id']
def get_raw_text(self):
return self.tweet['text']
def _process_text(self):
text = strip_html(self.tweet['text'])
user_mentions = self.tweet['entities']['user_mentions']
text = text.replace('@', 'at ')
for user in user_mentions:
text = text.replace(user['screen_name'], user['name'])
return text
def get_screen_name(self):
return self.tweet['user']['screen_name']
def get_user_name(self):
return self.tweet['user']['name']
def read_out(self, index):
text = self._process_text()
return "tweet number {num} by {user} : {text} ,".format(num=index+1,
user=self.get_user_name(),
text = text)
def detailed_description(self):
response_builder = ["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}."
.format(screen_name=self.tweet['user']['screen_name'],
user_name=self.tweet['user']['name'],
description=self.tweet['user']['description'])]
if self.tweet['retweeted']:
response_builder += ["It's been retweeted {} times.".format(self.tweet['retweet_count'])]
if self.tweet['favorited']:
response_builder += ["{} people have favorited it.".format(self.tweet['favorites_count'])]
if self.tweet["in_reply_to_screen_name"]:
response_builder += ["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]
response_builder += ["the text of the tweet is, {}.".format(self._process_text())]
return " ".join(response_builder)
def user_mentions(self):
return self.tweet['user_mentions']
def get_cached_access_pair(uid):
if uid in local_cache.users():
access_token = local_cache.get_user_state(uid)['access_token']
access_secret = local_cache.get_user_state(uid)['access_secret']
return access_token, access_secret
else:
raise ValueError
def get_request_token(callback_url=None):
url = "https://api.twitter.com/oauth/request_token"
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret)
params = { "oauth_callback" : callback_url }
r = requests.post(url, auth=auth, params=params)
response_obj = parse_qs(r.text)
local_cache.update_server_state({ "request_token" : response_obj['oauth_token'][0],
"request_secret": response_obj['oauth_token_secret'][0] })
return response_obj['oauth_token_secret'], response_obj['oauth_token']
def authenticate_user_page(callback_url="", metadata=None):
url = "https://api.twitter.com/oauth/authenticate"
oauth_secret, oauth_token = get_request_token(callback_url)
local_cache.update_server_state({'metadata' : metadata })
params = { "force_login" : True,
"oauth_token": oauth_token }
r = requests.get(url, params=params)
return r.text
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
def get_access_token(oauth_token, oauth_verifier):
url = "https://api.twitter.com/oauth/access_token"
params = {"oauth_verifier" : oauth_verifier}
server_state = local_cache.get_server_state()
request_token = server_state['request_token']
request_secret = server_state['request_secret']
consumer_key, consumer_secret = server_state['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret, request_token, request_secret)
r = requests.post(url, params = params, auth=auth)
response_obj = parse_qs(r.text)
uid = response_obj['oauth_token'][0]
print ("Access token", uid)
local_cache.set_user_state(user_id = uid,
state = { "access_token" : response_obj['oauth_token'][0],
"access_secret" : response_obj['oauth_token_secret'][0],
'twitter_user_id': response_obj['user_id'][0],
'screen_name' : response_obj ['screen_name'][0]
})
local_cache.serialize()
fragments = {
"state" : local_cache.get_server_state()['metadata']['state'],
"access_token" : uid,
"token_type" : "Bearer"
}
return urlencode(fragments)
def get_twitter_auth(user_id):
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
access_token, access_secret = get_cached_access_pair(user_id)
return OAuth1(consumer_key, consumer_secret, access_token, access_secret)
def process_tweets(tweet_list):
""" Clean tweets and enumerate, preserving only things that we are interested in """
return [Tweet(tweet) for tweet in tweet_list]
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
def get_user_twitter_details(user_id, params={}):
url = "https://api.twitter.com/1.1/users/lookup.json"
user_cache = local_cache.get_user_state(user_id)
params.update({"user_id": user_cache['twitter_user_id'] })
response = make_twitter_request(url, user_id, params)
return response.json()
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
def closest_trend_search(user_id, params={}):
#url = "https://api.twitter.com/1.1/trends/place.json"
url = "https://api.twitter.com/1.1/trends/closest.json"
response = make_twitter_request(url, user_id, params).json()
return response
def list_trends(user_id, woe_id):
url = "https://api.twitter.com/1.1/trends/place.json"
params = { "id" : woe_id }
response = make_twitter_request(url, user_id, params).json()
return response
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
def request_tweet_list(url, user_id, params={}):
return process_tweets(make_twitter_request(url, user_id).json())
def get_home_tweets(user_id, input_params={}):
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
print ("Trying to get home tweets")
response = request_tweet_list(url, user_id)
return response
def get_retweets_of_me(user_id, input_params={}):
""" returns recently retweeted tweets """
url = "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
print ("trying to get retweets")
return request_tweet_list(url, user_id)
def get_my_favourite_tweets(user_id, input_params = {}):
""" Returns a user's favourite tweets """
url = "https://api.twitter.com/1.1/favorites/list.json"
return request_tweet_list(url, user_id)
def get_user_latest_tweets(user_id, params={}):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?"
return request_tweet_list(url, user_id, params)
def get_latest_twitter_mentions(user_id):
url = "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
return request_tweet_list(url, user_id)
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"])
| 36.279621
| 150
| 0.615741
| 1,914
| 15,310
| 4.697492
| 0.140021
| 0.032032
| 0.021355
| 0.03003
| 0.375264
| 0.286286
| 0.241352
| 0.194194
| 0.164387
| 0.093872
| 0
| 0.005923
| 0.261137
| 15,310
| 421
| 151
| 36.365796
| 0.788897
| 0.052972
| 0
| 0.071918
| 0
| 0.003425
| 0.147986
| 0.003194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195205
| false
| 0.003425
| 0.034247
| 0.068493
| 0.417808
| 0.034247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e09899e15fdc6c14c2bf5b2ab6389520f9a3d9b7
| 1,399
|
py
|
Python
|
sundry/serializable.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 2
|
2019-10-02T06:30:27.000Z
|
2021-07-10T22:39:30.000Z
|
sundry/serializable.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 3
|
2019-03-13T17:15:58.000Z
|
2019-06-04T20:26:57.000Z
|
sundry/serializable.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 1
|
2019-03-08T21:37:29.000Z
|
2019-03-08T21:37:29.000Z
|
import json
from enum import Enum
from decimal import Decimal
def convert_serializable_special_cases(o):
"""
Convert an object to a type that is fairly generally serializable (e.g. json serializable).
This only handles the cases that need converting. The json module handles all the rest.
For JSON, with json.dump or json.dumps with argument default=convert_serializable.
Example:
json.dumps(my_animal, indent=4, default=_convert_serializable)
:param o: object to be converted to a type that is serializable
:return: a serializable representation
"""
if isinstance(o, Enum):
serializable_representation = o.value
elif isinstance(o, Decimal):
# decimal.Decimal (e.g. in AWS DynamoDB), both integer and floating point
if o % 1 == 0:
# if representable with an integer, use an integer
serializable_representation = int(o)
else:
# not representable with an integer so use a float
serializable_representation = float(o)
else:
raise NotImplementedError(f"can not serialize {o} since type={type(o)}")
return serializable_representation
def make_serializable(o):
# Convert an object to a type that is fairly generally serializable (e.g. json serializable).
return json.loads(json.dumps(o, default=convert_serializable_special_cases, sort_keys=True))
| 36.815789
| 97
| 0.709078
| 187
| 1,399
| 5.219251
| 0.427807
| 0.133197
| 0.021516
| 0.033811
| 0.164959
| 0.151639
| 0.151639
| 0.151639
| 0.151639
| 0.151639
| 0
| 0.00276
| 0.223016
| 1,399
| 37
| 98
| 37.810811
| 0.895124
| 0.501787
| 0
| 0.125
| 0
| 0
| 0.06422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0.0625
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e09a68dcd0689137530fb16dbc35c12c92deee70
| 36,880
|
py
|
Python
|
yggdrasil/drivers/MatlabModelDriver.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
yggdrasil/drivers/MatlabModelDriver.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
yggdrasil/drivers/MatlabModelDriver.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import uuid as uuid_gen
import logging
from datetime import datetime
import os
import psutil
import warnings
import weakref
from yggdrasil import backwards, tools, platform, serialize
from yggdrasil.languages import get_language_dir
from yggdrasil.config import ygg_cfg
from yggdrasil.drivers.InterpretedModelDriver import InterpretedModelDriver
from yggdrasil.tools import TimeOut, sleep
logger = logging.getLogger(__name__)
try: # pragma: matlab
disable_engine = ygg_cfg.get('matlab', 'disable_engine', 'False').lower()
if platform._is_win or (disable_engine == 'true'):
_matlab_engine_installed = False
if not tools.is_subprocess():
logger.debug("matlab.engine disabled")
else:
import matlab.engine
_matlab_engine_installed = True
except ImportError: # pragma: no matlab
logger.debug("Could not import matlab.engine. "
+ "Matlab support for using a sharedEngine will be disabled.")
_matlab_engine_installed = False
_top_lang_dir = get_language_dir('matlab')
_compat_map = {
'R2015b': ['2.7', '3.3', '3.4'],
'R2017a': ['2.7', '3.3', '3.4', '3.5'],
'R2017b': ['2.7', '3.3', '3.4', '3.5', '3.6'],
'R2018b': ['2.7', '3.3', '3.4', '3.5', '3.6']}
def kill_all():
r"""Kill all Matlab shared engines."""
if platform._is_win: # pragma: windows
os.system(('taskkill /F /IM matlab.engine.shareEngine /T'))
else:
os.system(('pkill -f matlab.engine.shareEngine'))
def locate_matlab_engine_processes(): # pragma: matlab
r"""Get all of the active matlab sharedEngine processes.
Returns:
list: Active matlab sharedEngine processes.
"""
out = []
for p in psutil.process_iter():
p.info = p.as_dict(attrs=['name', 'pid', 'cmdline'])
if (((p.info['name'] == 'MATLAB')
and ('matlab.engine.shareEngine' in p.info['cmdline']))):
out.append(p) # p.info['pid'])
return out
def is_matlab_running():
r"""Determine if there is a Matlab engine running.
Returns:
bool: True if there is a Matlab engine running, False otherwise.
"""
if not _matlab_engine_installed: # pragma: no matlab
out = False
else: # pragma: matlab
out = (len(matlab.engine.find_matlab()) != 0)
return out
def locate_matlabroot(): # pragma: matlab
r"""Find directory that servers as matlab root.
Returns:
str: Full path to matlabroot directory.
"""
return MatlabModelDriver.get_matlab_info()[0]
def install_matlab_engine(): # pragma: matlab
r"""Install the MATLAB engine API for Python."""
if not _matlab_engine_installed:
mtl_root = locate_matlabroot()
mtl_setup = os.path.join(mtl_root, 'extern', 'engines', 'python')
cmd = 'python setup.py install'
result = subprocess.check_output(cmd, cwd=mtl_setup)
print(result)
def start_matlab_engine(skip_connect=False, timeout=None): # pragma: matlab
r"""Start a Matlab shared engine session inside a detached screen
session.
Args:
skip_connect (bool, optional): If True, the engine is not connected.
Defaults to False.
timeout (int, optional): Time (in seconds) that should be waited for
Matlab to start up. Defaults to None and is set from the config
option ('matlab', 'startup_waittime_s').
Returns:
tuple: Information on the started session including the name of the
screen session running matlab, the created engine object, the name
of the matlab session, and the matlab engine process.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if timeout is None:
timeout = float(ygg_cfg.get('matlab', 'startup_waittime_s', 10))
old_process = set(locate_matlab_engine_processes())
old_matlab = set(matlab.engine.find_matlab())
screen_session = str('ygg_matlab' + datetime.today().strftime("%Y%j%H%M%S")
+ '_%d' % len(old_matlab))
try:
args = ['screen', '-dmS', screen_session, '-c',
os.path.join(_top_lang_dir, 'matlab_screenrc'),
'matlab', '-nodisplay', '-nosplash', '-nodesktop', '-nojvm',
'-r', '"matlab.engine.shareEngine"']
subprocess.call(' '.join(args), shell=True)
T = TimeOut(timeout)
while ((len(set(matlab.engine.find_matlab()) - old_matlab) == 0)
and not T.is_out):
logger.debug('Waiting for matlab engine to start')
sleep(1) # Usually 3 seconds
except KeyboardInterrupt: # pragma: debug
args = ['screen', '-X', '-S', screen_session, 'quit']
subprocess.call(' '.join(args), shell=True)
raise
if (len(set(matlab.engine.find_matlab()) - old_matlab) == 0): # pragma: debug
raise Exception("start_matlab timed out at %f s" % T.elapsed)
new_matlab = list(set(matlab.engine.find_matlab()) - old_matlab)[0]
new_process = list(set(locate_matlab_engine_processes()) - old_process)[0]
# Connect to the engine
matlab_engine = None
if not skip_connect:
matlab_engine = connect_matlab_engine(new_matlab, first_connect=True)
return screen_session, matlab_engine, new_matlab, new_process
def connect_matlab_engine(matlab_session, first_connect=False): # pragma: matlab
r"""Connect to Matlab engine.
Args:
matlab_session (str): Name of the Matlab session that should be
connected to.
first_connect (bool, optional): If True, this is the first time
Python is connecting to the Matlab shared engine and certain
environment variables should be set. Defaults to False.
Returns:
MatlabEngine: Matlab engine that was connected.
"""
matlab_engine = matlab.engine.connect_matlab(matlab_session)
matlab_engine.eval('clear classes;', nargout=0)
err = backwards.StringIO()
try:
matlab_engine.eval("YggInterface('YGG_MSG_MAX');", nargout=0,
stderr=err)
except BaseException:
for x in MatlabModelDriver.paths_to_add:
matlab_engine.addpath(x, nargout=0)
matlab_engine.eval("os = py.importlib.import_module('os');", nargout=0)
if not first_connect:
if backwards.PY2:
matlab_engine.eval("py.reload(os);", nargout=0)
else:
# matlab_engine.eval("py.importlib.reload(os);", nargout=0)
pass
return matlab_engine
def stop_matlab_engine(screen_session, matlab_engine, matlab_session,
matlab_process, keep_engine=False): # pragma: matlab
r"""Stop a Matlab shared engine session running inside a detached screen
session.
Args:
screen_session (str): Name of the screen session that the shared
Matlab session was started in.
matlab_engine (MatlabEngine): Matlab engine that should be stopped.
matlab_session (str): Name of Matlab session that the Matlab engine is
connected to.
matlab_process (psutil.Process): Process running the Matlab shared engine.
keep_engine (bool, optional): If True, the references to the engine will be
removed so it is not deleted. Defaults to False.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if keep_engine and (matlab_engine is not None):
if '_matlab' in matlab_engine.__dict__:
matlab_engine.quit()
return
# Remove weakrefs to engine to prevent stopping engine more than once
if matlab_engine is not None:
# Remove weak references so engine not deleted on exit
eng_ref = weakref.getweakrefs(matlab_engine)
for x in eng_ref:
if x in matlab.engine._engines:
matlab.engine._engines.remove(x)
# Either exit the engine or remove its reference
if matlab_session in matlab.engine.find_matlab():
try:
matlab_engine.eval('exit', nargout=0)
except BaseException:
pass
else: # pragma: no cover
matlab_engine.__dict__.pop('_matlab', None)
# Stop the screen session containing the Matlab shared session
if screen_session is not None:
if matlab_session in matlab.engine.find_matlab():
os.system(('screen -X -S %s quit') % screen_session)
T = TimeOut(5)
while ((matlab_session in matlab.engine.find_matlab())
and not T.is_out):
logger.debug("Waiting for matlab engine to exit")
sleep(1)
if (matlab_session in matlab.engine.find_matlab()): # pragma: debug
if matlab_process is not None:
matlab_process.terminate()
logger.error("stop_matlab_engine timed out at %f s. " % T.elapsed
+ "Killed Matlab sharedEngine process.")
class MatlabProcess(tools.YggClass): # pragma: matlab
r"""Add features to mimic subprocess.Popen while running Matlab function
asynchronously.
Args:
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict, optional): Keyword arguments that should be passed to
target. Defaults to empty dict.
name (str, optional): A name for the process. Generated if not provided.
matlab_engine (MatlabEngine, optional): MatlabEngine that should be used
to get errors. Defaults to None and errors will not be recovered
unless passed through stdout and stderr before shutdown.
Attributes:
stdout (StringIO): File like string buffer that stdout from target will
be written to.
stderr (StringIO): File like string buffer that stderr from target will
be written to.
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict): Keyword arguments that should be passed to target.
future (MatlabFutureResult): Future result from async function. This
will be None until start is called.
matlab_engine (MatlabEngine): MatlabEngine that should be used to get
errors.
Raises:
RuntimeError: If Matlab is not installed.
"""
def __init__(self, target, args, kwargs=None, name=None, matlab_engine=None):
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if kwargs is None:
kwargs = {}
self.stdout = backwards.sio.StringIO()
self.stderr = backwards.sio.StringIO()
self._stdout_line = None
self._stderr_line = None
self.target = target
self.args = args
self.kwargs = kwargs
self.kwargs.update(nargout=0, stdout=self.stdout, stderr=self.stderr)
self.kwargs['async'] = True # For python 3.7 where async is reserved
self.future = None
self.matlab_engine = matlab_engine
self._returncode = None
super(MatlabProcess, self).__init__(name)
def poll(self, *args, **kwargs):
r"""Fake poll."""
return self.returncode
@property
def stdout_line(self):
r"""str: Output to stdout from function call."""
if self._stdout_line is None:
if self.stdout is not None:
line = self.stdout.getvalue()
if line:
self._stdout_line = line
return self._stdout_line
@property
def stderr_line(self):
r"""str: Output to stderr from function call."""
if self._stderr_line is None:
if self.stderr is not None:
line = self.stderr.getvalue()
if line:
self._stderr_line = line
return self._stderr_line
def print_output(self):
r"""Print output from stdout and stderr."""
if self.stdout_line:
self.print_encoded(self.stdout_line, end="")
if self.stderr_line:
self.print_encoded(self.stderr_line, end="")
def start(self):
r"""Start asychronous call."""
self.future = self.target(*self.args, **self.kwargs)
def is_started(self):
r"""bool: Has start been called."""
return (self.future is not None)
def is_cancelled(self):
r"""bool: Was the async call cancelled or not."""
if self.is_started():
try:
return self.future.cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_done(self):
r"""bool: Is the async call still running."""
if self.is_started():
try:
return self.future.done() or self.is_cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_alive(self):
r"""bool: Is the async call funning."""
if self.is_started():
return (not self.is_done())
return False
@property
def returncode(self):
r"""int: Return code."""
if self.is_done():
if self.stderr_line: # or self.is_cancelled():
return -1
else:
return 0
else:
return self._returncode
def kill(self, *args, **kwargs):
r"""Cancel the async call."""
if self.is_alive():
try:
out = self.future.cancel()
self.debug("Result of cancelling Matlab call?: %s", out)
except matlab.engine.EngineError as e:
self.debug('Matlab Engine Error: %s' % e)
self.on_matlab_error()
except BaseException as e:
self.debug('Other error on kill: %s' % e)
self.print_output()
if self.is_alive():
self.info('Error killing Matlab script.')
self.matlab_engine.quit()
self.future = None
self._returncode = -1
assert(not self.is_alive())
def on_matlab_error(self):
r"""Actions performed on error in Matlab engine."""
# self.print_output()
self.debug('')
if self.matlab_engine is not None:
try:
self.matlab_engine.eval('exception = MException.last;', nargout=0)
self.matlab_engine.eval('getReport(exception)')
except matlab.engine.EngineError:
pass
class MatlabModelDriver(InterpretedModelDriver): # pragma: matlab
r"""Base class for running Matlab models.
Args:
name (str): Driver name.
args (str or list): Argument(s) for running the model in matlab.
Generally, this should be the full path to a Matlab script.
**kwargs: Additional keyword arguments are passed to parent class's
__init__ method.
Attributes:
started_matlab (bool): True if the driver had to start a new matlab
engine. False otherwise.
screen_session (str): Screen session that Matlab was started in.
mlengine (object): Matlab engine used to run script.
mlsession (str): Name of the Matlab session that was started.
Raises:
RuntimeError: If Matlab is not installed.
.. note:: Matlab models that call exit will shut down the shared engine.
"""
_schema_subtype_description = ('Model is written in Matlab.')
language = 'matlab'
language_ext = '.m'
base_languages = ['python']
default_interpreter_flags = ['-nodisplay', '-nosplash', '-nodesktop',
'-nojvm', '-batch']
version_flags = ["fprintf('R%s', version('-release')); exit();"]
path_env_variable = 'MATLABPATH'
comm_linger = (os.environ.get('YGG_MATLAB_ENGINE', '').lower() == 'true')
send_converters = {'pandas': serialize.consolidate_array,
'table': serialize.consolidate_array}
recv_converters = {'pandas': 'array'}
type_map = {
'int': 'intX',
'float': 'single, double',
'string': 'char',
'array': 'cell',
'object': 'containers.Map',
'boolean': 'logical',
'null': 'NaN',
'uint': 'uintX',
'complex': 'complex',
'bytes': 'char (utf-8)',
'unicode': 'char',
'1darray': 'mat',
'ndarray': 'mat',
'ply': 'containers.Map',
'obj': 'containers.Map',
'schema': 'containers.Map'}
function_param = {
'input': '{channel} = YggInterface(\'YggInput\', \'{channel_name}\');',
'output': '{channel} = YggInterface(\'YggOutput\', \'{channel_name}\');',
'recv': '[{flag_var}, {recv_var}] = {channel}.recv();',
'send': '{flag_var} = {channel}.send({send_var});',
'function_call': '{output_var} = {function_name}({input_var});',
'define': '{variable} = {value};',
'comment': '%',
'true': 'true',
'not': 'not',
'indent': 2 * ' ',
'quote': '\'',
'print': 'disp(\'{message}\');',
'fprintf': 'fprintf(\'{message}\', {variables});',
'error': 'error(\'{error_msg}\');',
'block_end': 'end;',
'if_begin': 'if ({cond})',
'for_begin': 'for {iter_var} = {iter_begin}:{iter_end}',
'while_begin': 'while ({cond})',
'break': 'break;',
'try_begin': 'try',
'try_except': 'catch {error_var}',
'assign': '{name} = {value};'}
def __init__(self, name, args, **kwargs):
self.using_matlab_engine = _matlab_engine_installed
if self.using_matlab_engine:
kwargs['skip_interpreter'] = True
self.model_wrapper = None
super(MatlabModelDriver, self).__init__(name, args, **kwargs)
self.started_matlab = False
self.screen_session = None
self.mlengine = None
self.mlsession = None
self.mlprocess = None
def parse_arguments(self, args):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
"""
super(MatlabModelDriver, self).parse_arguments(args)
model_base, model_ext = os.path.splitext(os.path.basename(self.model_file))
wrap_base = 'wrapped_%s_%s' % (model_base, self.uuid.replace('-', '_'))
# Matlab has a variable name limit of 62
wrap_base = wrap_base[:min(len(wrap_base), 60)]
self.model_wrapper = os.path.join(self.model_dir, wrap_base + model_ext)
self.wrapper_products.append(self.model_wrapper)
@classmethod
def write_error_wrapper(cls, fname, try_lines, matlab_engine=None):
r"""Write a wrapper for the model that encloses it in a try except so
that the error can be propagated appropriately.
Args:
fname (str): File where the wrapper should be written.
try_lines (list): List of lines to go in the try block.
model_file (str): Path to model that should be wrapped.
matlab_engine (MatlabEngine, optional): Matlab engine that will be
used to call the wrapper. If not provided, it is assumed the
error will be called using the Matlab interpreter on the command
line. Defautls to None.
Raises:
"""
# Create lines based on use of engine or not
if matlab_engine is not None:
catch_block = ["error(e.message);"]
else:
catch_block = ["rethrow(e);"]
# catch_block = ["fprintf('MATLAB ERROR:\\n%s\\n', e.message);",
# "disp(e.identifier);",
# "disp(e.stack);",
# "exit(0);"]
lines = cls.write_try_except(try_lines, catch_block)
if matlab_engine is None:
lines.append("exit(0);")
# Write lines
logger.debug('Wrapper:\n\t%s', '\n\t'.join(lines))
if fname is None:
return lines
else:
if os.path.isfile(fname): # pragma: debug
os.remove(fname)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
logger.debug("Wrote wrapper to: %s" % fname)
@classmethod
def run_executable(cls, args, dont_wrap_error=False, fname_wrapper=None,
matlab_engine=None, **kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
dont_wrap_error (bool, optional): If False, the executable will be
wrapped in a try/catch block to prevent errors from stopping
Matlab shutdown. If True, the command will be executed as is
with the Matlab interpreter. Defaults to False.
fname_wrapper (str, optional): File where wrapper should be saved.
If not provided, one is created. Defaults to None.
matlab_engine (MatlabEngine, optional): Matlab engine that should be
used to run the command. If not provided, the Matlab interpreter
is used instead. Defaults to None.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
# Strip file if first argument is a file
if os.path.isfile(args[0]):
kwargs.setdefault('working_dir', os.path.dirname(args[0]))
args = [os.path.splitext(os.path.basename(args[0]))[0]] + args[1:]
# Write wrapper
if (not dont_wrap_error) and (len(args) > 0):
if len(args) == 1:
# TODO: Will this work if there is a function defined in the
# script?
try_block = [args[0]]
if not try_block[0].endswith(';'):
try_block[0] += ';'
else:
# Put quotes around arguments since they would be strings when
# passed from the command line
func_call = "%s('%s'" % (args[0], args[1])
for a in args[2:]:
func_call += (", '%s'" % a)
func_call += ');'
try_block = [func_call]
if fname_wrapper is None:
fname_wrapper = 'wrapper_%s%s' % (str(uuid_gen.uuid4()),
cls.language_ext[0])
fname_wrapper = fname_wrapper.replace('-', '_')
working_dir = kwargs.get('working_dir', kwargs.get('cwd', None))
if working_dir is not None:
fname_wrapper = os.path.join(working_dir, fname_wrapper)
cls.write_error_wrapper(fname_wrapper, try_block,
matlab_engine=matlab_engine)
assert(os.path.isfile(fname_wrapper))
args = [os.path.splitext(os.path.basename(fname_wrapper))[0]]
# Call base, catching error to remove temp wrapper
try:
if matlab_engine is None:
kwargs['for_matlab'] = True
out = super(MatlabModelDriver, cls).run_executable(args, **kwargs)
else:
if kwargs.get('debug_flags', None): # pragma: debug
logger.warn("Debugging via valgrind, strace, etc. disabled "
"for Matlab when using a Matlab shared engine.")
assert(kwargs.get('return_process', False))
# Add environment variables
env = kwargs.get('env', {})
old_env = {}
new_env_str = ''
for k, v in env.items():
old_env[k] = matlab_engine.getenv(k)
matlab_engine.setenv(k, v, nargout=0)
new_env_str += "'%s', %s, " % (k, repr(v))
matlab_engine.eval('new_env = py.dict(pyargs(%s));'
% new_env_str[:-2], nargout=0)
matlab_engine.eval('os.environ.update(new_env);', nargout=0)
# Create matlab process using Matlab engine
out = MatlabProcess(name=args[0] + '.MatlabProcess',
target=getattr(matlab_engine, args[0]),
args=args[1:], matlab_engine=matlab_engine)
out.start()
finally:
if (((not kwargs.get('return_process', False))
and (fname_wrapper is not None))):
os.remove(fname_wrapper)
return out
@classmethod
def language_version(cls):
r"""Determine the version of this language.
Returns:
str: Version of compiler/interpreter for this language.
"""
return cls.get_matlab_info()[1]
@classmethod
def executable_command(cls, args, **kwargs):
r"""Compose a command for running a program in this language with the
provied arguments. If not already present, the interpreter command and
interpreter flags are prepended to the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the interpreter for this language.
"""
# if kwargs.get('exec_type', 'interpreter') == 'interpreter':
# args = ["\"%s\"" % (' '.join(args))]
return super(MatlabModelDriver, cls).executable_command(args, **kwargs)
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language. This includes locating
any required external libraries and setting option defaults.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = InterpretedModelDriver.configure.__func__(cls, cfg)
opts = {
'startup_waittime_s': [('The time allowed for a Matlab engine to start'
'before timing out and reporting an error.'),
'10'],
'version': ['The version (release number) of installed Matlab.', ''],
'matlabroot': ['The path to the default installation of matlab.', '']}
if cfg.get(cls.language, 'disable', 'False').lower() != 'true':
try:
opts['matlabroot'][1], opts['version'][1] = cls.get_matlab_info()
except RuntimeError: # pragma: no matlab
pass
for k in opts.keys():
if not cfg.has_option(cls.language, k):
if opts[k][1]: # pragma: matlab
cfg.set(cls.language, k, opts[k][1])
else:
out.append((cls.language, k, opts[k][0]))
return out
@classmethod
def get_matlab_info(cls): # pragma: matlab
r"""Determine the root directory where Matlab is installed and the version
that is installed (if Matlab is installed at all). This will fail if Matlab
is not installed, cannot be started, or does not operate as expected.
Returns:
tuple: Matlab root directory and Matlab version string.
Raises:
RuntimeError: If Matlab cannot be started or the root directory or
release cannot be determiend.
"""
mtl_id = '=MATLABROOT='
cmd = ("fprintf('" + mtl_id + "%s" + mtl_id + "R%s" + mtl_id + "'"
+ ",matlabroot,version('-release'));")
mtl_proc = cls.run_executable([cmd])
mtl_id = backwards.match_stype(mtl_proc, mtl_id)
if mtl_id not in mtl_proc: # pragma: debug
raise RuntimeError(("Could not locate ID string (%s) in "
"output (%s).") % (mtl_id, mtl_proc))
parts = mtl_proc.split(mtl_id)
if len(parts) < 3: # pragma: debug
raise RuntimeError(("Could not get matlabroot/version from "
"output (%s).") % (mtl_proc))
matlabroot = backwards.as_str(parts[-3])
release = backwards.as_str(parts[-2])
return matlabroot, release
def start_matlab_engine(self):
r"""Start matlab session and connect to it."""
ml_attr = ['screen_session', 'mlengine', 'mlsession', 'mlprocess']
attempt_connect = (len(matlab.engine.find_matlab()) != 0)
# Connect to matlab if a session exists
if attempt_connect:
for mlsession in matlab.engine.find_matlab():
try:
self.debug("Trying to connect to session %s", mlsession)
self.mlengine = connect_matlab_engine(mlsession)
self.mlsession = mlsession
self.debug("Connected to existing shared engine: %s",
self.mlsession)
break
except matlab.engine.EngineError:
pass
# Start if not running or connect failed
if self.mlengine is None:
if attempt_connect:
self.debug("Starting a matlab shared engine (connect failed)")
else:
self.debug("Starting a matlab shared engine (none existing)")
out = start_matlab_engine()
for i, attr in enumerate(ml_attr):
setattr(self, attr, out[i])
self.started_matlab = True
# Add things to Matlab environment
self.mlengine.addpath(self.model_dir, nargout=0)
self.debug("Connected to matlab session '%s'" % self.mlsession)
def before_start(self):
r"""Actions to perform before the run loop."""
kwargs = dict(fname_wrapper=self.model_wrapper)
if self.using_matlab_engine:
self.start_matlab_engine()
kwargs.update(matlab_engine=self.mlengine,
no_queue_thread=True)
else:
kwargs.update(working_dir=self.model_dir)
with self.lock:
if self.using_matlab_engine and (self.mlengine is None): # pragma: debug
self.debug('Matlab engine not set. Stopping')
return
super(MatlabModelDriver, self).before_start(**kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
if self.using_matlab_engine:
self.model_process.print_output()
self.periodic_debug('matlab loop', period=100)('Looping')
if self.model_process.is_done():
self.model_process.print_output()
self.set_break_flag()
try:
self.model_process.future.result()
self.model_process.print_output()
except matlab.engine.EngineError:
self.model_process.print_output()
except BaseException:
self.model_process.print_output()
self.exception("Error running model.")
else:
self.sleep()
else:
super(MatlabModelDriver, self).run_loop()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
if self.using_matlab_engine:
if (self.model_process is not None) and self.model_process.is_alive():
self.info("Model process thread still alive")
self.kill_process()
return
super(MatlabModelDriver, self).after_loop()
if self.using_matlab_engine:
with self.lock:
self.cleanup()
def cleanup(self):
r"""Close the Matlab session and engine."""
if self.using_matlab_engine:
try:
stop_matlab_engine(self.screen_session, self.mlengine,
self.mlsession, self.mlprocess,
keep_engine=(not self.started_matlab))
except (SystemError, Exception) as e: # pragma: debug
self.error('Failed to exit matlab engine')
self.raise_error(e)
self.debug('Stopped Matlab')
self.screen_session = None
self.mlsession = None
self.started_matlab = False
self.mlengine = None
self.mlprocess = None
super(MatlabModelDriver, self).cleanup()
def check_exits(self):
r"""Check to make sure the program dosn't contain any exits as exits
will shut down the Matlab engine as well as the program.
Raises:
RuntimeError: If there are any exit calls in the file.
"""
has_exit = False
with open(self.raw_model_file, 'r') as fd:
for i, line in enumerate(fd):
if line.strip().startswith('exit'):
has_exit = True
break
if self.using_matlab_engine and has_exit:
warnings.warn(
"Line %d in '%s' contains an " % (
i, self.raw_model_file)
+ "'exit' call which will exit the MATLAB engine "
+ "such that it cannot be reused. Please replace 'exit' "
+ "with a return or error.")
def set_env(self):
r"""Get environment variables that should be set for the model process.
Returns:
dict: Environment variables for the model process.
"""
out = super(MatlabModelDriver, self).set_env()
if self.using_matlab_engine:
out['YGG_MATLAB_ENGINE'] = 'True'
# TODO: Move the following to InterpretedModelDriver once another
# language sets path_env_variable
path_list = []
prev_path = out.pop(self.path_env_variable, '')
if prev_path:
path_list.append(prev_path)
if isinstance(self.paths_to_add, list):
for x in self.paths_to_add:
if x not in prev_path:
path_list.append(x)
path_list.append(self.model_dir)
if path_list:
out[self.path_env_variable] = os.pathsep.join(path_list)
return out
@classmethod
def comm_atexit(cls, comm):
r"""Operations performed on comm at exit including draining receive.
Args:
comm (CommBase): Communication object.
"""
if comm.direction == 'recv':
while comm.recv(timeout=0)[0]:
comm.sleep()
else:
comm.send_eof()
comm.linger_close()
@classmethod
def decode_format(cls, format_str):
r"""Method for decoding format strings created in this language.
Args:
format_str (str): Encoded format string.
Returns:
str: Decoded format string.
"""
return backwards.decode_escape(format_str)
@classmethod
def prepare_output_variables(cls, vars_list):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable names to concatenate as output
from a function call.
Returns:
str: Concatentated variables list.
"""
out = super(MatlabModelDriver, cls).prepare_output_variables(vars_list)
if isinstance(vars_list, list) and (len(vars_list) > 1):
out = '[%s]' % out
return out
| 40.086957
| 85
| 0.585195
| 4,360
| 36,880
| 4.81445
| 0.140596
| 0.076033
| 0.009718
| 0.011529
| 0.209852
| 0.146777
| 0.104235
| 0.073889
| 0.053404
| 0.0454
| 0
| 0.005067
| 0.315049
| 36,880
| 919
| 86
| 40.130577
| 0.825898
| 0.285466
| 0
| 0.226446
| 0
| 0.001653
| 0.135531
| 0.012921
| 0
| 0
| 0
| 0.002176
| 0.004959
| 1
| 0.064463
| false
| 0.008264
| 0.028099
| 0
| 0.171901
| 0.023141
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e09ce665126d9b3d2e1a629422eb3823667146fa
| 3,453
|
py
|
Python
|
ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py
|
meaningfy-ws/ted-sws
|
d1e351eacb2900f84ec7edc457e49d8202fbaff5
|
[
"Apache-2.0"
] | 1
|
2022-03-21T12:32:52.000Z
|
2022-03-21T12:32:52.000Z
|
ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py
|
meaningfy-ws/ted-sws
|
d1e351eacb2900f84ec7edc457e49d8202fbaff5
|
[
"Apache-2.0"
] | 24
|
2022-02-10T10:43:56.000Z
|
2022-03-29T12:36:21.000Z
|
ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py
|
meaningfy-ws/ted-sws
|
d1e351eacb2900f84ec7edc457e49d8202fbaff5
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
from typing import Iterator
import pandas as pd
from ted_sws.resources.prefixes import PREFIXES_DEFINITIONS
import re
CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME = "Rules"
RULES_SF_FIELD_ID = 'Standard Form Field ID (M)'
RULES_SF_FIELD_NAME = 'Standard Form Field Name (M)'
RULES_E_FORM_BT_ID = 'eForm BT-ID (O)'
RULES_E_FORM_BT_NAME = 'eForm BT Name (O)'
RULES_BASE_XPATH = 'Base XPath (for anchoring) (M)'
RULES_FIELD_XPATH = 'Field XPath (M)'
RULES_CLASS_PATH = 'Class path (M)'
RULES_PROPERTY_PATH = 'Property path (M)'
DEFAULT_RQ_NAME = 'sparql_query_'
SPARQL_PREFIX_PATTERN = re.compile('(?:\\s+|^)(\\w+)?:')
SPARQL_PREFIX_LINE = 'PREFIX {prefix}: <{value}>'
def get_sparql_prefixes(sparql_q: str) -> set:
finds: list = re.findall(SPARQL_PREFIX_PATTERN, sparql_q)
return set(finds)
def sparql_validation_generator(data: pd.DataFrame) -> Iterator[str]:
"""
This function generates SPARQL queries based on data in the dataframe.
:param data:
:return:
"""
for index, row in data.iterrows():
sf_field_id = row[RULES_SF_FIELD_ID]
sf_field_name = row[RULES_SF_FIELD_NAME]
e_form_bt_id = row[RULES_E_FORM_BT_ID]
e_form_bt_name = row[RULES_E_FORM_BT_NAME]
base_xpath = row[RULES_BASE_XPATH]
field_xpath = row[RULES_FIELD_XPATH]
class_path = row[RULES_CLASS_PATH]
property_path = row[RULES_PROPERTY_PATH]
prefixes = [SPARQL_PREFIX_LINE.format(
prefix=prefix, value=PREFIXES_DEFINITIONS.get(prefix)
) for prefix in get_sparql_prefixes(property_path)]
yield f"#title: {sf_field_id} - {sf_field_name}\n" \
f"#description: “{sf_field_id} - {sf_field_name}” in SF corresponds to “{e_form_bt_id} {e_form_bt_name}” in eForms. The corresponding XML element is {base_xpath}{field_xpath}. The expected ontology instances are epo: {class_path} .\n" \
"\n" + "\n".join(prefixes) + "\n\n" \
f"ASK WHERE {{ {property_path} }}"
def mapping_suite_processor_generate_sparql_queries(conceptual_mappings_file_path: pathlib.Path,
output_sparql_queries_folder_path: pathlib.Path,
rq_name: str = DEFAULT_RQ_NAME):
"""
This function reads data from conceptual_mappings.xlsx and generates SPARQL validation queries in provided package.
:param conceptual_mappings_file_path:
:param output_sparql_queries_folder_path:
:param rq_name:
:return:
"""
with open(conceptual_mappings_file_path, 'rb') as excel_file:
conceptual_mappings_rules_df = pd.read_excel(excel_file, sheet_name=CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME)
conceptual_mappings_rules_df.columns = conceptual_mappings_rules_df.iloc[0]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[1:]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[
conceptual_mappings_rules_df[RULES_PROPERTY_PATH].notnull()]
sparql_queries = sparql_validation_generator(conceptual_mappings_rules_df)
output_sparql_queries_folder_path.mkdir(parents=True, exist_ok=True)
for index, sparql_query in enumerate(sparql_queries):
output_file_path = output_sparql_queries_folder_path / f"{rq_name}{index}.rq"
with open(output_file_path, "w") as output_file:
output_file.write(sparql_query)
| 46.662162
| 250
| 0.705763
| 470
| 3,453
| 4.780851
| 0.261702
| 0.12016
| 0.112595
| 0.100134
| 0.223409
| 0.129506
| 0.073431
| 0.073431
| 0
| 0
| 0
| 0.000728
| 0.20417
| 3,453
| 73
| 251
| 47.30137
| 0.816958
| 0.091804
| 0
| 0
| 0
| 0.019231
| 0.18102
| 0.00845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.096154
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0a0f22bfda8fa26025e7c4065f5e2b941f28ecf
| 3,892
|
py
|
Python
|
src/controllers/serie.py
|
igormotta92/gta-desafio-python-flask-api
|
7c048239359e8a21d777109bdb0d58b6c2c18450
|
[
"MIT"
] | null | null | null |
src/controllers/serie.py
|
igormotta92/gta-desafio-python-flask-api
|
7c048239359e8a21d777109bdb0d58b6c2c18450
|
[
"MIT"
] | null | null | null |
src/controllers/serie.py
|
igormotta92/gta-desafio-python-flask-api
|
7c048239359e8a21d777109bdb0d58b6c2c18450
|
[
"MIT"
] | null | null | null |
# https://stackoverflow.com/questions/3300464/how-can-i-get-dict-from-sqlite-query
# from flask import Flask
from flask_restful import Resource, reqparse
from src.model.serie import SerieModel
from src.server.instance import server
from db import db
# books_db = [{"id": 0, "title": "War and Peace"}, {"id": 1, "title": "Clean Code"}]
api = server.api
class SeriesController(Resource):
@classmethod
def routes(self):
api.add_resource(Series, "/series/<int:id>")
api.add_resource(SeriesList, "/series")
class Series(Resource):
def get(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id(id)
if not serie:
return {serie}, 204
return serie
def put(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return None, 204
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"rating",
type=int,
choices=range(1, 6),
required=True,
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"season", type=int, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
# update
serie.title = data.title
serie.resume = data.resume
serie.genre = data.genre
serie.rating = data.rating
serie.season = data.season
try:
serie.update()
except Exception as error:
return {"Error": str(error)}, 400
return None, 200, {"Location": f"http://127.0.0.1:5000/series/{id}"}
def delete(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return {}, 204
serie.delete()
return serie.to_dict(), 200
class SeriesList(Resource):
def get(self):
SerieModel.setConnectDataBase(db)
try:
series = SerieModel.find_all()
except Exception as error:
return {"Error": str(error)}, 400
return series
def post(self):
SerieModel.setConnectDataBase(db)
###
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
# request
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"rating",
type=int,
required=True,
choices=range(1, 6),
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"season", type=str, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
###
serie = SerieModel().build(
data.title, data.resume, data.genre, data.rating, data.season
)
try:
lastid = serie.insert().lastrowid
except Exception as error:
return {"Error": str(error)}, 400
return None, 201, {"Location": f"http://127.0.0.1:5000/series/{lastid}"}
| 29.938462
| 93
| 0.573741
| 442
| 3,892
| 4.975113
| 0.230769
| 0.040928
| 0.077308
| 0.060482
| 0.589359
| 0.570714
| 0.570714
| 0.570714
| 0.570714
| 0.532515
| 0
| 0.022305
| 0.308839
| 3,892
| 129
| 94
| 30.170543
| 0.795167
| 0.095067
| 0
| 0.494949
| 0
| 0
| 0.123717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.040404
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0a7f01f58bb59078e58b00112eda388117b8294
| 1,590
|
py
|
Python
|
python-3.6.0/Doc/includes/email-unpack.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 854
|
2017-09-11T16:42:28.000Z
|
2022-03-27T14:17:09.000Z
|
python-3.6.0/Doc/includes/email-unpack.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 164
|
2017-09-24T20:40:32.000Z
|
2021-10-30T01:35:05.000Z
|
python-3.6.0/Doc/includes/email-unpack.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 73
|
2017-09-13T18:07:48.000Z
|
2022-03-17T13:02:29.000Z
|
#!/usr/bin/env python3
"""Unpack a MIME message into a directory of files."""
import os
import email
import mimetypes
from email.policy import default
from argparse import ArgumentParser
def main():
parser = ArgumentParser(description="""\
Unpack a MIME message into a directory of files.
""")
parser.add_argument('-d', '--directory', required=True,
help="""Unpack the MIME message into the named
directory, which will be created if it doesn't already
exist.""")
parser.add_argument('msgfile')
args = parser.parse_args()
with open(args.msgfile, 'rb') as fp:
msg = email.message_from_binary_file(fp, policy=default)
try:
os.mkdir(args.directory)
except FileExistsError:
pass
counter = 1
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
with open(os.path.join(args.directory, filename), 'wb') as fp:
fp.write(part.get_payload(decode=True))
if __name__ == '__main__':
main()
| 29.444444
| 78
| 0.611321
| 195
| 1,590
| 4.876923
| 0.538462
| 0.029443
| 0.047319
| 0.037855
| 0.082019
| 0.082019
| 0.082019
| 0.082019
| 0.082019
| 0
| 0
| 0.004429
| 0.289937
| 1,590
| 53
| 79
| 30
| 0.83791
| 0.164151
| 0
| 0
| 0
| 0
| 0.193328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.027778
| 0.138889
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0a80310257c1b06b4c2e9dcba5929214b903c35
| 1,400
|
py
|
Python
|
src/streetview/logging_facility.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | 1
|
2021-02-27T07:39:05.000Z
|
2021-02-27T07:39:05.000Z
|
src/streetview/logging_facility.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | null | null | null |
src/streetview/logging_facility.py
|
juliantrue/Streetview-Segmenting
|
337740e6ebd2284c880ace09a11032c5914b39a4
|
[
"MIT"
] | 1
|
2021-12-06T23:35:34.000Z
|
2021-12-06T23:35:34.000Z
|
import sys, os
import logging
import datetime
module_name = 'Streetview_Module'
debug_mode = True
class LoggingWrapper(object):
def __init__(self, log_folder_path=None):
self.debug_mode = debug_mode
# Create logger with module name
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
now = datetime.datetime.now()
log_file = '{}{}{}{}{}{}.log'.format(now.year, now.month, now.day,
now.hour, now.minute,
now.second)
# If no folder provided, output to stderr
if log_folder_path == None:
fh = logging.StreamHandler(sys.stderr)
else:
log_file = os.path.join(log_folder_path, log_file)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
| 32.55814
| 93
| 0.597857
| 160
| 1,400
| 5.10625
| 0.43125
| 0.034272
| 0.047736
| 0.041616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.31
| 1,400
| 42
| 94
| 33.333333
| 0.845756
| 0.173571
| 0
| 0
| 0
| 0
| 0.073913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ab0941f48814dab8b198e84e5c5153cca3e066
| 7,827
|
py
|
Python
|
sandbox_api/asandbox.py
|
PremierLangage/sandbox-api
|
7150ddcb92ac2304ff1d7b23571ec5e20459747b
|
[
"MIT"
] | 4
|
2020-01-27T19:06:05.000Z
|
2021-06-01T08:27:30.000Z
|
sandbox_api/asandbox.py
|
qcoumes/sandbox-api
|
7150ddcb92ac2304ff1d7b23571ec5e20459747b
|
[
"MIT"
] | null | null | null |
sandbox_api/asandbox.py
|
qcoumes/sandbox-api
|
7150ddcb92ac2304ff1d7b23571ec5e20459747b
|
[
"MIT"
] | null | null | null |
# asandbox.py
#
# Authors:
# - Coumes Quentin <coumes.quentin@gmail.com>
"""An asynchronous implementation of the Sandbox API."""
import io
import json
import os
from contextlib import AbstractAsyncContextManager
from typing import BinaryIO, Optional, Union
import aiohttp
from .exceptions import status_exceptions
from .utils import ENDPOINTS
class ASandbox(AbstractAsyncContextManager):
"""Interface a Sandbox server asynchronously."""
def __init__(self, url: str, total: Optional[float] = 60, connect: Optional[float] = None,
sock_connect: Optional[float] = None, sock_read: Optional[float] = None):
"""Initialize a sandbox with the given URL.
Default timeout for the whole operation is one minute, use the following
argument to override :
* total : The whole operation time including connection
establishment, request sending and response reading.
* connect : The time consists connection establishment for a new
connection or waiting for a free connection from a pool if
pool connection limits are exceeded.
* sock_connect : A timeout for connecting to a peer for a new
connection, not given from a pool.
* sock_read : The maximum allowed timeout for period between reading
a new data portion from a peer.
"""
self.url = url
self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total, connect, sock_connect, sock_read)
)
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def close(self):
"""Close the aiohttp ClientSession."""
await self.session.close()
async def _build_url(self, endpoint: str, *args: str):
"""Build the url corresponding to <endpoint> with the given <args>."""
return os.path.join(self.url, ENDPOINTS[endpoint] % tuple(args))
async def libraries(self) -> dict:
"""Asynchronously retrieve libraries installed in the containers of the
sandbox."""
async with self.session.get(await self._build_url("libraries")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def specifications(self) -> dict:
"""Asynchronously retrieve specifications of the sandbox."""
async with self.session.get(await self._build_url("specifications")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def usage(self) -> dict:
"""Asynchronously retrieve current usage stats of the sandbox."""
async with self.session.get(await self._build_url("usages")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def download(self, uuid: str, path: str = None) -> BinaryIO:
"""Asynchronously download an environment or a specific file inside an
environment."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.get(url) as response:
if response.status != 200:
raise status_exceptions(response)
return io.BytesIO(await response.read())
async def check(self, uuid: str, path: str = None) -> int:
"""Asynchronously check if an environment or a specific file inside an
environment exists."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.head(url) as response:
if response.status not in [200, 404]: # pragma: no cover
raise status_exceptions(response)
return 0 if response.status == 404 else response.headers["Content-Length"]
async def execute(self, config: Union[dict], environ: Optional[BinaryIO] = None) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("config", json.dumps(config))
if environ is not None:
data.add_field("environment", environ)
async with self.session.post(await self._build_url("execute"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def load(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
async with self.session.post(await self._build_url("load/fr"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def demo(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("demo", True)
async with self.session.post(await self._build_url("demo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def playexo(self, config: dict, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("config", json.dumps(config))
async with self.session.post(await self._build_url("exo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def exec(self, datas: dict = {}) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(datas))
for key, value in datas.items():
data.add_field(str(key), value)
async with self.session.post(await self._build_url("exec"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
| 38.55665
| 94
| 0.609046
| 916
| 7,827
| 5.135371
| 0.194323
| 0.026786
| 0.035714
| 0.043367
| 0.60034
| 0.580357
| 0.564201
| 0.555272
| 0.555272
| 0.509141
| 0
| 0.007147
| 0.302798
| 7,827
| 202
| 95
| 38.747525
| 0.854865
| 0.110515
| 0
| 0.473684
| 0
| 0
| 0.028543
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010526
| false
| 0
| 0.084211
| 0
| 0.221053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ab889f41c5f27938c1c1068877196809ff21fd
| 4,928
|
py
|
Python
|
api/services/usuarios_services.py
|
jhonnattan123/fastapi_crud_example
|
24e1c295d41ad364ef839a4756e85b5bd640385a
|
[
"MIT"
] | 1
|
2022-03-25T17:37:46.000Z
|
2022-03-25T17:37:46.000Z
|
api/services/usuarios_services.py
|
jhonnattan123/fastapi_crud_example
|
24e1c295d41ad364ef839a4756e85b5bd640385a
|
[
"MIT"
] | null | null | null |
api/services/usuarios_services.py
|
jhonnattan123/fastapi_crud_example
|
24e1c295d41ad364ef839a4756e85b5bd640385a
|
[
"MIT"
] | null | null | null |
import datetime
from uuid import UUID
from api.actions import storage
from fastapi import HTTPException
from api.models.usuario import Usuario
from starlette.requests import Request
from api.dependencies import validar_email, validar_formato_fecha,validar_edad
FORMATO_FECHA = "%Y-%m-%d"
EDAD_MINIMA = 18
EDAD_MAXIMA = 100
class Usuarios_Services:
""" Sección de servicios para el manejo de la logica de negocio
Attributes:
FORMATO_FECHA (str): Formato de fecha para validar
EDAD_MINIMA (int): Edad minima para validar
EDAD_MAXIMA (int): Edad maxima para validar
"""
def agregar_usuario(self, usuario: Usuario, request: Request) -> dict:
""" Agrega un usuario a la base de datos.
:param usuario: Usuario a agregar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
usuario_id = storage.add(usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al agregar usuario: {}".format(str(e)))
raise e
def editar_usuario(self, usuario_id: UUID, usuario: Usuario, request: Request) -> dict:
""" Edita un usuario de la base de datos.
:param usuario_id: ID del usuario a editar
:param usuario: Usuario a editar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
storage.update(usuario_id, usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al editar usuario: {}".format(str(e)))
raise e
def eliminar_usuario(self, usuario_id: UUID, request: Request) -> dict:
""" Elimina un usuario de la base de datos.
:param usuario_id: ID del usuario a eliminar
:param request: Request de FastAPI
"""
try:
storage.delete(Usuario, usuario_id, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al eliminar usuario: {}".format(str(e)))
raise e
def listar_usuarios(self, pagina: int, cantidad: int, order_by: str, sort: str, request: Request)-> dict:
""" Obtiene una lista de usuarios de la base de datos.
:param pagina: Pagina a retornar
:param cantidad: Cantidad de usuarios a retornar
:param order_by: Campo por el cual se ordenará la lista
:param sort: Orden ascendente o descendente
:param request: Request de FastAPI
"""
try:
return storage.get_all(Usuario, pagina, cantidad, request, order_by, sort)
except Exception as e:
print("Error al listar usuarios: {}".format(str(e)))
raise e
def obtener_usuario(self, usuario_id: UUID, request: Request) -> Usuario:
""" Retorna un usuario por su ID
:param usuario_id: ID del usuario a consultar
:param request: Request de FastAPI
"""
try:
usuario = storage.get_by_id(Usuario, usuario_id, request)
return usuario
except Exception as e:
print("Error al obtener usuario: {}".format(str(e)))
raise e
| 32.421053
| 109
| 0.584416
| 557
| 4,928
| 5.046679
| 0.197487
| 0.041622
| 0.025614
| 0.059765
| 0.630381
| 0.583067
| 0.519032
| 0.433298
| 0.433298
| 0.433298
| 0
| 0.007101
| 0.342735
| 4,928
| 152
| 110
| 32.421053
| 0.860759
| 0.199269
| 0
| 0.582278
| 0
| 0
| 0.092473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0
| 0.088608
| 0
| 0.227848
| 0.063291
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ad08c2a04080e6246b307168d37bc9b104e50c
| 10,204
|
py
|
Python
|
certau/util/taxii/client.py
|
thisismyrobot/cti-toolkit
|
faf6e912af69376f5c55902c1592f7eeb0ce03dd
|
[
"BSD-3-Clause"
] | 12
|
2016-07-11T07:53:05.000Z
|
2021-07-19T12:20:21.000Z
|
certau/util/taxii/client.py
|
thisismyrobot/cti-toolkit
|
faf6e912af69376f5c55902c1592f7eeb0ce03dd
|
[
"BSD-3-Clause"
] | null | null | null |
certau/util/taxii/client.py
|
thisismyrobot/cti-toolkit
|
faf6e912af69376f5c55902c1592f7eeb0ce03dd
|
[
"BSD-3-Clause"
] | 4
|
2016-11-13T22:38:10.000Z
|
2022-01-15T08:21:15.000Z
|
import os
import logging
import dateutil
import pickle
from six.moves.urllib.parse import urlparse
from libtaxii import get_message_from_http_response, VID_TAXII_XML_11
from libtaxii.messages_11 import PollRequest, PollFulfillmentRequest
from libtaxii.messages_11 import PollResponse, generate_message_id
from libtaxii.clients import HttpClient
from certau import version_string
class SimpleTaxiiClient(HttpClient):
"""A simple interface to libtaxii for sending TAXII client messages.
Args:
username: a username for HTTP basic authentication
password: a password for HTTP basic authentication
key_file: a file containing a private key
(for SSL certificate-based authentication)
cert_file: a file containing a certificate
(for SSL certificate-based authentication)
ca_file: a file containing the CA's certificate
(for verifying the server's certificate)
"""
def __init__(self, username=None, password=None,
key_file=None, cert_file=None, ca_file=None):
super(SimpleTaxiiClient, self).__init__()
self._logger = logging.getLogger()
self.username = username
self.password = password
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
def setup_authentication(self, use_ssl):
"""Setup the appropriate credentials and authentication type.
Initialises the authentication settings for the connection.
Args:
use_ssl: should this connection use SSL
"""
self.set_use_https(use_ssl)
credentials = dict()
if self.username and self.password:
credentials['username'] = self.username
credentials['password'] = self.password
if use_ssl and self.key_file and self.cert_file:
credentials['key_file'] = self.key_file
credentials['cert_file'] = self.cert_file
if credentials:
self.set_auth_credentials(credentials)
if self.username and self.password:
if use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT_BASIC)
self._logger.debug("TAXII authentication using private key "
"(%s), certificate (%s), and credentials "
"for user '%s'", self.key_file,
self.cert_file, self.username)
else:
self.set_auth_type(HttpClient.AUTH_BASIC)
self._logger.debug("TAXII authentication using credentials "
"for user '%s'", self.username)
elif use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT)
self._logger.debug("TAXII authentication using private key (%s) "
"and certificate (%s) only", self.key_file,
self.cert_file)
else:
self.set_auth_type(HttpClient.AUTH_NONE)
self._logger.debug("no TAXII authentication")
# CA certificate verification
if use_ssl and self.ca_file:
self.set_verify_server(verify_server=True, ca_file=self.ca_file)
self._logger.debug("SSL - verification using CA file (%s)",
self.ca_file)
@staticmethod
def create_poll_request(collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None):
"""Create a poll request message using supplied parameters."""
request_kwargs = dict(
message_id=generate_message_id(),
collection_name=collection,
exclusive_begin_timestamp_label=begin_timestamp,
inclusive_end_timestamp_label=end_timestamp,
)
if subscription_id:
request_kwargs['subscription_id'] = subscription_id
else:
request_kwargs['poll_parameters'] = PollRequest.PollParameters()
return PollRequest(**request_kwargs)
@staticmethod
def create_fulfillment_request(collection, result_id, part_number):
return PollFulfillmentRequest(
message_id=generate_message_id(),
collection_name=collection,
result_id=result_id,
result_part_number=part_number,
)
def send_taxii_message(self, request, host, path, port):
# Send the request message and return the response
http_response = self.call_taxii_service2(
host=host,
path=path,
message_binding=VID_TAXII_XML_11,
post_data=request.to_xml(),
port=port,
user_agent='{} (libtaxii)'.format(version_string)
)
response = get_message_from_http_response(
http_response=http_response,
in_response_to=request.message_id,
)
return response
@staticmethod
def get_poll_time(filename, poll_url, collection):
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if isinstance(poll_state, dict) and poll_url in poll_state:
if collection in poll_state[poll_url]:
time_string = poll_state[poll_url][collection]
return dateutil.parser.parse(time_string)
return None
@staticmethod
def save_poll_time(filename, poll_url, collection, timestamp):
if timestamp is not None:
poll_state = dict()
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if not isinstance(poll_state, dict):
raise Exception('unexpected content encountered when '
'reading TAXII poll state file')
if poll_url not in poll_state:
poll_state[poll_url] = dict()
poll_state[poll_url][collection] = str(timestamp)
with open(filename, 'wb') as state_file:
pickle.dump(poll_state, state_file, protocol=2)
def poll(self, poll_url, collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None, state_file=None):
"""Send the TAXII poll request to the server using the given URL."""
# Parse the poll_url to get the parts required by libtaxii
url_parts = urlparse(poll_url)
# Allow credentials to be provided in poll_url
if url_parts.username and url_parts.password:
self.username = url_parts.username
self.password = url_parts.password
self._logger.debug('updating username and password from poll_url')
if url_parts.scheme not in ['http', 'https']:
raise Exception('invalid scheme in poll_url (%s); expected '
'"http" or "https"', poll_url)
use_ssl = True if url_parts.scheme == 'https' else False
# Initialise the authentication settings
self.setup_authentication(use_ssl)
if state_file and not begin_timestamp:
begin_timestamp = self.get_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
)
request = self.create_poll_request(
collection=collection,
subscription_id=subscription_id,
begin_timestamp=begin_timestamp,
end_timestamp=end_timestamp,
)
self._logger.debug('sending poll request (url=%s, collection=%s)',
poll_url, collection)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
first = True
poll_end_time = None
while True:
if not isinstance(response, PollResponse):
raise Exception('didn\'t get a poll response')
self._logger.debug('received poll response '
'(content_blocks=%d, result_id=%s, more=%s)',
len(response.content_blocks),
response.result_id,
'True' if response.more else 'False')
# Save end timestamp from first PollResponse
if first:
poll_end_time = response.inclusive_end_timestamp_label
if len(response.content_blocks) == 0:
if first:
self._logger.info('poll response contained '
'no content blocks')
break
for content_block in response.content_blocks:
yield content_block
if not response.more:
break
# Send a fulfilment request
if first:
# Initialise fulfilment request values
part_number = response.result_part_number
result_id = response.result_id
first = False
part_number += 1
request = self.create_fulfillment_request(
collection=collection,
result_id=result_id,
part_number=part_number,
)
self._logger.debug('sending fulfilment request '
'(result_id=%s, part_number=%d)',
result_id, part_number)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
# Update the timestamp for the latest poll
if state_file and poll_end_time:
self.save_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
timestamp=poll_end_time,
)
| 38.217228
| 78
| 0.591141
| 1,106
| 10,204
| 5.209765
| 0.169078
| 0.024297
| 0.023429
| 0.011107
| 0.292954
| 0.219368
| 0.191947
| 0.171989
| 0.154634
| 0.137279
| 0
| 0.001782
| 0.340063
| 10,204
| 266
| 79
| 38.360902
| 0.853876
| 0.112995
| 0
| 0.237113
| 0
| 0
| 0.084712
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041237
| false
| 0.041237
| 0.051546
| 0.005155
| 0.123711
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ada93a5debd6b2509b477f0b39c69cfae7e923
| 768
|
py
|
Python
|
tutorials/registration/data.py
|
YipengHu/MPHY0041
|
6e9706eba2b9f9a2449539d7dea5f91dde807584
|
[
"Apache-2.0"
] | 1
|
2022-02-21T23:05:49.000Z
|
2022-02-21T23:05:49.000Z
|
tutorials/registration/data.py
|
YipengHu/MPHY0041
|
6e9706eba2b9f9a2449539d7dea5f91dde807584
|
[
"Apache-2.0"
] | 2
|
2022-01-07T11:43:06.000Z
|
2022-03-17T02:11:58.000Z
|
tutorials/registration/data.py
|
YipengHu/MPHY0041
|
6e9706eba2b9f9a2449539d7dea5f91dde807584
|
[
"Apache-2.0"
] | null | null | null |
import os
import zipfile
import requests
DATA_PATH = './data'
RESULT_PATH = './result'
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
print('Downloading and extracting data...')
url = 'https://weisslab.cs.ucl.ac.uk/WEISSTeaching/datasets/-/archive/hn2dct/datasets-hn2dct.zip'
r = requests.get(url,allow_redirects=True)
temp_file = 'temp.zip'
_ = open(temp_file,'wb').write(r.content)
with zipfile.ZipFile(temp_file,'r') as zip_obj:
zip_obj.extractall(DATA_PATH)
os.remove(temp_file)
print('Done.')
print('Head-neck 2D CT data downloaded: %s' % os.path.abspath(os.path.join(DATA_PATH,'datasets-hn2dct')))
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
print('Result directory created: %s' % os.path.abspath(RESULT_PATH))
| 27.428571
| 105
| 0.736979
| 119
| 768
| 4.613445
| 0.453782
| 0.07286
| 0.025501
| 0.040073
| 0.061931
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005822
| 0.105469
| 768
| 27
| 106
| 28.444444
| 0.793304
| 0
| 0
| 0
| 0
| 0.05
| 0.301173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ae282e70b49bf571087a6d88c319ae9d3cc9d4
| 3,774
|
py
|
Python
|
insights/parsers/tests/test_freeipa_healthcheck_log.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/tests/test_freeipa_healthcheck_log.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/tests/test_freeipa_healthcheck_log.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
import doctest
from insights.parsers import freeipa_healthcheck_log
from insights.parsers.freeipa_healthcheck_log import FreeIPAHealthCheckLog
from insights.tests import context_wrap
LONG_FREEIPA_HEALTHCHECK_LOG_OK = """
[{"source": "ipahealthcheck.ipa.roles", "check": "IPACRLManagerCheck",
"result": "SUCCESS", "uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z", "duration": "0.002254",
"kw": {"key": "crl_manager", "crlgen_enabled": true}}]
""".strip()
LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES = """
[{"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR", "uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z", "duration": "0.000474", "kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/", "free_space": 14, "threshold": 512}}]
""".strip()
FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE = '''
[
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPACRLManagerCheck",
"result": "SUCCESS",
"uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z",
"duration": "0.002254",
"kw": {
"key": "crl_manager",
"crlgen_enabled": true
}
},
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPARenewalMasterCheck",
"result": "SUCCESS",
"uuid": "1feb7f99-2e98-4e37-bb52-686896972022",
"when": "20191203122317Z",
"duration": "0.018330",
"kw": {
"key": "renewal_master",
"master": true
}
},
{
"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR",
"uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z",
"duration": "0.000474",
"kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/",
"free_space": 14,
"threshold": 512
}
}
]
'''.strip()
FREEIPA_HEALTHCHECK_LOG_OK = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_OK.splitlines())
FREEIPA_HEALTHCHECK_LOG_FAILURES = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES.splitlines())
def test_freeipa_healthcheck_log_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
assert len(log_obj.issues) == 0
def test_freeipa_healthcheck_log_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
assert len(log_obj.issues) > 0
for issue in log_obj.issues:
assert issue['check'] == 'FileSystemSpaceCheck'
assert issue['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_get_results_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 0
def test_freeipa_healthcheck_get_results_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 1
for result in results:
assert result['result'] in ['ERROR', 'CRITICAL']
assert result['check'] == 'FileSystemSpaceCheck'
assert result['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_log__documentation():
env = {
'healthcheck': FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE)),
}
failed, total = doctest.testmod(freeipa_healthcheck_log, globs=env)
assert failed == 0
| 35.942857
| 98
| 0.673026
| 388
| 3,774
| 6.296392
| 0.25
| 0.14736
| 0.154728
| 0.056488
| 0.749488
| 0.65002
| 0.591895
| 0.591895
| 0.537863
| 0.537863
| 0
| 0.074651
| 0.183625
| 3,774
| 104
| 99
| 36.288462
| 0.718273
| 0
| 0
| 0.235955
| 0
| 0.022472
| 0.54266
| 0.163222
| 0
| 0
| 0
| 0
| 0.11236
| 1
| 0.05618
| false
| 0
| 0.044944
| 0
| 0.101124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0aedd632aed5a57b006b298a3c339eedfc172f6
| 3,484
|
py
|
Python
|
recipes/recipes/windows_image_builder/winpe_customization.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipes/windows_image_builder/winpe_customization.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | 4
|
2022-03-17T18:58:21.000Z
|
2022-03-17T18:58:22.000Z
|
recipes/recipes/windows_image_builder/winpe_customization.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import post_process
from PB.recipes.infra.windows_image_builder import windows_image_builder as wib
from PB.recipes.infra.windows_image_builder import actions
from PB.recipes.infra.windows_image_builder import sources
from recipe_engine.post_process import DropExpectation, StatusSuccess
from RECIPE_MODULES.infra.windows_scripts_executor import test_helper as t
DEPS = [
'depot_tools/gitiles',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/json',
'windows_adk',
'windows_scripts_executor',
]
PYTHON_VERSION_COMPATIBILITY = 'PY3'
PROPERTIES = wib.Image
def RunSteps(api, image):
""" This recipe executes offline_winpe_customization."""
if not api.platform.is_win:
raise AssertionError('This recipe can only run on windows')
# this recipe will only execute the offline winpe customizations
for cust in image.customizations:
assert (cust.WhichOneof('customization') == 'offline_winpe_customization')
# initialize the image to scripts executor
api.windows_scripts_executor.init()
custs = api.windows_scripts_executor.init_customizations(image)
# pinning all the refs and generating unique keys
custs = api.windows_scripts_executor.process_customizations(custs)
# download all the required refs
api.windows_scripts_executor.download_all_packages(custs)
# download and install the windows ADK and WinPE packages
api.windows_adk.ensure()
# execute the customizations given
api.windows_scripts_executor.execute_customizations(custs)
wpe_image = 'wpe_image'
wpe_cust = 'generic'
arch = 'x86'
key = '9055a3e678be47d58bb860d27b85adbea41fd2ef3e22c5b7cb3180edf358de90'
def GenTests(api):
# actions for adding files from git
ACTION_ADD_STARTNET = actions.Action(
add_file=actions.AddFile(
name='add_startnet_file',
src=sources.Src(
git_src=sources.GITSrc(
repo='chromium.dev',
ref='HEAD',
src='windows/artifacts/startnet.cmd'),),
dst='Windows\\System32',
))
STARTNET_URL = 'chromium.dev/+/ef70cb069518e6dc3ff24bfae7f195de5099c377/' +\
'windows/artifacts/startnet.cmd'
yield (api.test('not_run_on_windows', api.platform('linux', 64)) +
api.expect_exception('AssertionError') +
api.post_process(DropExpectation))
yield (api.test('happy path', api.platform('win', 64)) + api.properties(
t.WPE_IMAGE(wpe_image, wib.ARCH_X86, wpe_cust, 'happy test',
[ACTION_ADD_STARTNET])) +
# mock all the init and deinit steps
t.MOCK_WPE_INIT_DEINIT_SUCCESS(api, key, arch, wpe_image, wpe_cust) +
# mock git pin file
t.GIT_PIN_FILE(api, wpe_cust, 'HEAD', 'windows/artifacts/startnet.cmd',
'HEAD') +
# mock add file to wpe_image mount dir step
t.ADD_FILE(api, wpe_image, wpe_cust, STARTNET_URL) +
# assert that the generated wpe_image was uploaded
t.CHECK_GCS_UPLOAD(
api, wpe_image, wpe_cust,
'\[CLEANUP\]\\\\{}\\\\workdir\\\\gcs.zip'.format(wpe_cust),
'gs://chrome-gce-images/WIB-WIM/{}.zip'.format(key)) +
api.post_process(StatusSuccess) + api.post_process(DropExpectation))
| 35.55102
| 80
| 0.705798
| 436
| 3,484
| 5.431193
| 0.364679
| 0.030405
| 0.065034
| 0.052787
| 0.108953
| 0.054476
| 0.054476
| 0.054476
| 0
| 0
| 0
| 0.026466
| 0.197474
| 3,484
| 97
| 81
| 35.917526
| 0.820458
| 0.188289
| 0
| 0
| 0
| 0
| 0.227564
| 0.136396
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.033333
| false
| 0
| 0.1
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0afd7d06dd45ec0003e8757b057e5c949b8d859
| 374
|
py
|
Python
|
back/lollangCompiler/main.py
|
wonjinYi/lollang-playground
|
2df07ccc2518e6dc9f9aa00b2f38ad8d62cdb507
|
[
"MIT"
] | 11
|
2022-03-12T06:41:29.000Z
|
2022-03-15T06:15:52.000Z
|
back/lollangCompiler/main.py
|
wonjinYi/lollang-playground
|
2df07ccc2518e6dc9f9aa00b2f38ad8d62cdb507
|
[
"MIT"
] | 4
|
2022-03-14T12:01:09.000Z
|
2022-03-26T20:19:52.000Z
|
back/lollangCompiler/main.py
|
wonjinYi/lollang-playground
|
2df07ccc2518e6dc9f9aa00b2f38ad8d62cdb507
|
[
"MIT"
] | null | null | null |
from lollangCompiler.compiler import Compiler
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--file", required=True, help="컴파일할 파일을 선택해주세요.")
parser.add_argument("--out", default="out.py", help="목적 파이썬 파일경로를 선택해주세요")
args = parser.parse_args()
cmp = Compiler()
cmp.compileFile(args.file, args.out)
| 37.4
| 78
| 0.708556
| 47
| 374
| 5.404255
| 0.617021
| 0.110236
| 0.133858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149733
| 374
| 10
| 79
| 37.4
| 0.798742
| 0
| 0
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b0e0083223143424e08a5e2722940882568d5e
| 2,174
|
py
|
Python
|
src/add_2_zip_imports.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/add_2_zip_imports.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/add_2_zip_imports.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.api import users as gusers
from mcfw.cache import CachedModelMixIn
from mcfw.consts import MISSING
from mcfw.restapi import register_postcall_hook, INJECTED_FUNCTIONS
from mcfw.rpc import serialize_value, get_type_details
from rogerthat.rpc import users
from rogerthat.utils import OFFLOAD_TYPE_WEB, offload
from rogerthat.utils.transactions import on_trans_committed
dummy = lambda: None
def log_restapi_call_result(function, success, kwargs, result_or_error):
if function.meta['silent']:
request_data = "****"
else:
kwarg_types = function.meta[u"kwarg_types"]
request_data = dict()
for arg, value in kwargs.iteritems():
if arg == 'accept_missing':
continue
if value == MISSING:
continue
request_data[arg] = serialize_value(value, *get_type_details(kwarg_types[arg], value), skip_missing=True)
if function.meta['silent_result']:
result = "****"
elif isinstance(result_or_error, Exception):
result = unicode(result_or_error)
else:
result = result_or_error
offload(users.get_current_user() or gusers.get_current_user(), OFFLOAD_TYPE_WEB, request_data,
result, function.meta['uri'], success)
register_postcall_hook(log_restapi_call_result)
INJECTED_FUNCTIONS.get_current_session = users.get_current_session
del log_restapi_call_result
CachedModelMixIn.on_trans_committed = lambda self, f, *args, **kwargs: on_trans_committed(f, *args, **kwargs)
| 36.233333
| 117
| 0.731831
| 296
| 2,174
| 5.179054
| 0.472973
| 0.039139
| 0.03392
| 0.039139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00619
| 0.182613
| 2,174
| 59
| 118
| 36.847458
| 0.8565
| 0.278749
| 0
| 0.121212
| 0
| 0
| 0.035461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.242424
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b1290a0ccf26bc0c338627492bdd788761baa7
| 8,396
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0026_cloud_tables.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/migrate/versions/0026_cloud_tables.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/migrate/versions/0026_cloud_tables.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
"""
This script adds tables needed for Galaxy cloud functionality.
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
metadata = MetaData()
CloudImage_table = Table( "cloud_image", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "provider_type", TEXT ),
Column( "image_id", TEXT, nullable=False ),
Column( "manifest", TEXT ),
Column( "state", TEXT ),
Column( "architecture", TEXT ),
Column( "deleted", Boolean, default=False ) )
""" UserConfiguredInstance (UCI) table """
UCI_table = Table( "cloud_uci", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
Column( "key_pair_name", TEXT ),
Column( "key_pair_material", TEXT ),
Column( "name", TEXT ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "total_size", Integer ),
Column( "launch_time", DateTime ),
Column( "deleted", Boolean, default=False ) )
CloudInstance_table = Table( "cloud_instance", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "launch_time", DateTime ),
Column( "stop_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "type", TEXT ),
Column( "reservation_id", TEXT ),
Column( "instance_id", TEXT ),
Column( "mi_id", Integer, ForeignKey( "cloud_image.id" ), index=True ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "public_dns", TEXT ),
Column( "private_dns", TEXT ),
Column( "security_group", TEXT ),
Column( "availability_zone", TEXT ) )
CloudStore_table = Table( "cloud_store", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "attach_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
Column( "volume_id", TEXT ),
Column( "size", Integer, nullable=False ),
Column( "availability_zone", TEXT ),
Column( "inst_id", Integer, ForeignKey( "cloud_instance.id" ) ),
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot_table = Table( "cloud_snapshot", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
Column( "snapshot_id", TEXT ),
Column( "status", TEXT ),
Column( "description", TEXT ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudUserCredentials_table = Table( "cloud_user_credentials", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
Column( "name", TEXT ),
Column( "access_key", TEXT ),
Column( "secret_key", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudProvider_table = Table( "cloud_provider", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "type", TEXT, nullable=False ),
Column( "name", TEXT ),
Column( "region_connection", TEXT ),
Column( "region_name", TEXT ),
Column( "region_endpoint", TEXT ),
Column( "is_secure", Boolean ),
Column( "host", TEXT ),
Column( "port", Integer ),
Column( "proxy", TEXT ),
Column( "proxy_port", TEXT ),
Column( "proxy_user", TEXT ),
Column( "proxy_pass", TEXT ),
Column( "debug", Integer ),
Column( "https_connection_factory", TEXT ),
Column( "path", TEXT ),
Column( "deleted", Boolean, default=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
# Load existing tables
metadata.reflect()
try:
CloudProvider_table.create()
CloudUserCredentials_table.create()
CloudImage_table.create()
UCI_table.create()
CloudInstance_table.create()
CloudStore_table.create()
CloudSnapshot_table.create()
except Exception:
log.exception("Creating cloud tables failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
CloudSnapshot_table.drop()
CloudStore_table.drop()
CloudInstance_table.drop()
UCI_table.drop()
CloudImage_table.drop()
CloudUserCredentials_table.drop()
CloudProvider_table.drop()
except Exception:
log.exception("Dropping cloud tables failed.")
| 54.167742
| 132
| 0.488328
| 694
| 8,396
| 5.723343
| 0.168588
| 0.100705
| 0.066969
| 0.077543
| 0.501511
| 0.473061
| 0.395015
| 0.376888
| 0.353223
| 0.353223
| 0
| 0
| 0.402454
| 8,396
| 154
| 133
| 54.519481
| 0.791708
| 0.010005
| 0
| 0.422222
| 0
| 0
| 0.140644
| 0.008594
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014815
| false
| 0.007407
| 0.02963
| 0
| 0.044444
| 0.014815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b5c736d3e79ca55e6b015bca8f2bcfa9bec4d1
| 30,844
|
py
|
Python
|
image_misc.py
|
frankgh/deep-visualization-toolbox
|
c9bb26eacae0b4d1a25d3844538c2830026add76
|
[
"MIT"
] | null | null | null |
image_misc.py
|
frankgh/deep-visualization-toolbox
|
c9bb26eacae0b4d1a25d3844538c2830026add76
|
[
"MIT"
] | null | null | null |
image_misc.py
|
frankgh/deep-visualization-toolbox
|
c9bb26eacae0b4d1a25d3844538c2830026add76
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import cv2
import matplotlib.pyplot as plt
import skimage
import skimage.io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, \
fromstring, ceil, dtype, float32, sqrt, dot, zeros
from misc import WithTimer
def norm01(arr):
arr = arr.copy()
arr -= arr.min()
arr /= arr.max() + 1e-10
return arr
def norm01c(arr, center):
'''Maps the input range to [0,1] such that the center value maps to .5'''
arr = arr.copy()
arr -= center
arr /= max(2 * arr.max(), -2 * arr.min()) + 1e-10
arr += .5
assert arr.min() >= 0
assert arr.max() <= 1
return arr
def norm0255(arr):
'''Maps the input range to [0,255] as dtype uint8'''
arr = arr.copy()
arr -= arr.min()
arr *= 255.0 / (arr.max() + 1e-10)
arr = array(arr, 'uint8')
return arr
def cv2_read_cap_rgb(cap, saveto=None):
rval, frame = cap.read()
if saveto:
cv2.imwrite(saveto, frame)
if len(frame.shape) == 2:
# Upconvert single channel grayscale to color
frame = frame[:, :, newaxis]
if frame.shape[2] == 1:
frame = tile(frame, (1, 1, 3))
if frame.shape[2] > 3:
# Chop off transparency
frame = frame[:, :, :3]
frame = frame[:, :, ::-1] # Convert native OpenCV BGR -> RGB
return frame
def plt_plot_signal(data, labels, zoom_level=-1, offset=0, markers=None, title=None):
fig = Figure(figsize=(5, 5))
canvas = FigureCanvas(fig)
ax = None
if len(data.shape) == 1:
data = expand_dims(data, axis=1)
if zoom_level == -1:
zoom_level = data.shape[0]
color = iter(cm.rainbow(linspace(0, 1, data.shape[1])))
s = offset
e = s + zoom_level
x = arange(s, e)
for i in range(data.shape[1]):
c = next(color)
label = labels[i] if labels is not None else 'Signal {}'.format(i + 1)
ax = fig.add_subplot(data.shape[1], 1, (i + 1), sharex=ax)
ax.plot(x, data[s:e, i], lw=1, label=label, c=c)
# # ax.set_adjustable('box-forced')
# ax.set_xlim(left=0, right=zoom_level)
# ax.get_xaxis().set_visible(i == data.shape[1] - 1)
# ax.xaxis.set_ticks(arange(s, e + 1, (e - s) / 10.0))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='lower right')
if markers is not None and i in markers:
for val in markers[i]:
if val >= s and val < e:
ax.axvline(x=val)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def plt_plot_heatmap(data,
shape,
rows,
cols,
title=None,
x_axis_label=None,
y_axis_label=None,
x_axis_values=None,
y_axis_values=None,
hide_axis=True,
vmin=None,
vmax=None):
res = []
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# for i in xrange(y.shape[0]):
# sns.heatmap(y[i], ax=ax, vmin=minn, vmax=maxx)
# canvas.draw() # draw the canvas, cache the renderer
#
# l, b, w, h = fig.bbox.bounds
# w, h = int(w), int(h)
# im = fromstring(canvas.tostring_rgb(), dtype='uint8')
# im.shape = h, w, 3
# res.append(im)
img = ax.imshow(
zeros((data.shape[1], data.shape[2])),
cmap='viridis',
vmin=vmin if vmin is not None else data.min(),
vmax=vmax if vmax is not None else data.max(),
interpolation='none',
aspect='auto'
)
# get rid of spines and fix range of axes, rotate x-axis labels
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if hide_axis:
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if title is not None:
plt.title(title)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if x_axis_values is not None:
a = arange(0, x_axis_values.shape[0], 3) + 0.5
b = arange(x_axis_values.min(), x_axis_values.max() + 1.5, 1.5)
ax.set_xticks(a)
ax.set_xticklabels(b, rotation=90)
if y_axis_values is not None:
a = arange(0, y_axis_values.shape[0], 3) + 0.5
# c = roundup((y_axis_values.max() - y_axis_values.min()) / 11)
# b = arange(y_axis_values.min(), y_axis_values.max(), c)
b = linspace(y_axis_values.min(), y_axis_values.max(), num=10, dtype=int)
ax.set_yticks(a)
ax.set_yticklabels(b)
# for tick in ax.get_xticklabels():
# tick.set_rotation(90)
if not hide_axis:
divider = make_axes_locatable(ax)
# colorbar on the right of ax. Colorbar width in % of ax and space between them is defined by pad in inches
cax = divider.append_axes('right', size='5%', pad=0.07)
cb = fig.colorbar(img, cax=cax)
# remove colorbar frame/spines
cb.outline.set_visible(False)
# don't stop after each subfigure change
plt.show(block=False)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
# start = time.time()
for i in xrange(data.shape[0]):
img.set_array(data[i])
# restore background
fig.canvas.restore_region(background)
ax.draw_artist(img)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
# loop through array
# for i in xrange(data.shape[0]):
# time.sleep(0.005)
# img.set_array(data[i])
# canvas.draw()
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filter(x, y, title, x_axis_label, y_axis_label, log_scale):
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
canvas = FigureCanvas(fig)
x = arange(0, y.shape[0]) if x is None else x
if log_scale == 1:
ax.semilogy(x, y, lw=2)
else:
ax.plot(x, y, lw=2)
ax.set(xlabel=x_axis_label, ylabel=y_axis_label, title=title)
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
fig.clf()
plt.clf()
plt.close()
return im
def plt_plot_filters_blit(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
log_scale=0,
hide_axis=False):
res = []
x = arange(0, y.shape[1]) if x is None else x
# if log_scale == 1:
# y = log(y)
# elif log_scale == 2:
# x = log(x)
# elif log_scale == 3:
# x = log(x)
# y = log(y)
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
ax.set_xlim(min(x), max(x))
ax.set_ylim(y.min(), y.max())
if hide_axis:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
line, = ax.plot([], [], lw=2)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
for i in xrange(y.shape[0]):
line.set_data(x, y[i])
# line.set_color()
# restore background
fig.canvas.restore_region(background)
# redraw just the points
ax.draw_artist(line)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filters_fast(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
res = []
shape = (ceil(shape[1] / 80 / cols), ceil(shape[0] / 80 / rows))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# ax.set_aspect('equal')
if share_axes:
if x is not None:
min_x, max_x = min(x), max(x)
else:
min_x, max_x = 0, y.shape[1]
min_y, max_y = y.min(), y.max()
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
# ax.hold(True)
plt.subplots_adjust(left=0.185, bottom=0.125, right=0.98, top=0.98)
# plt.show(False)
# plt.draw()
# background = fig.canvas.copy_from_bbox(ax.bbox)
# points = ax.plot(x[0], linewidth=1)[0]
for i in xrange(y.shape[0]):
if x is not None:
if log_scale == 1:
ax.semilogy(x, y[i], linewidth=1)
else:
ax.plot(x, y[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(y[i], linewidth=1)
else:
ax.plot(y[i], linewidth=1)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
# plt.autoscale(enable=True, axis='y', tight=True)
# plt.tight_layout()
# Turn off axes and set axes limits
# ax.axis('off')
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
# ax.cla()
fig.clf()
return array(res)
def plt_plot_filters(x, y, shape, rows, cols,
selected_unit=None,
selected_unit_color=None,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
shape = (ceil(shape[1] / 80), ceil(shape[0] / 80))
fig = Figure(figsize=shape)
canvas = FigureCanvas(fig)
ax, highlighted_ax, right_ax, bottom_ax, curr, right, bottom = None, None, None, None, None, None, None
if selected_unit is not None:
row = selected_unit / cols
col = selected_unit % cols
curr = selected_unit
bottom = (selected_unit + cols) if row < rows - 1 else None
right = (selected_unit + 1) if col < cols - 1 else None
for i in xrange(x.shape[0]):
if share_axes:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False, sharex=ax, sharey=ax)
else:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False)
if y is not None:
if log_scale == 1:
ax.semilogy(y, x[i], linewidth=1)
else:
ax.plot(y, x[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(x[i], linewidth=1)
else:
ax.plot(x[i], linewidth=1)
ax.set_xlim(left=0, right=x.shape[1] - 1)
ax.get_xaxis().set_visible(i >= ((rows - 1) * cols))
ax.get_yaxis().set_visible(i % cols == 0)
if i == curr:
highlighted_ax = ax
if i == bottom:
bottom_ax = ax
if i == right:
right_ax = ax
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if highlighted_ax is not None:
for axis in ['top', 'bottom', 'left', 'right']:
highlighted_ax.spines[axis].set_linewidth(2.5)
highlighted_ax.spines[axis].set_color(selected_unit_color)
if bottom_ax is not None:
bottom_ax.spines['top'].set_linewidth(2)
bottom_ax.spines['top'].set_color(selected_unit_color)
if right_ax is not None:
right_ax.spines['left'].set_linewidth(2)
right_ax.spines['left'].set_color(selected_unit_color)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def cv2_read_file_rgb(filename):
'''Reads an image from file. Always returns (x,y,3)'''
im = cv2.imread(filename)
if len(im.shape) == 2:
# Upconvert single channel grayscale to color
im = im[:, :, newaxis]
if im.shape[2] == 1:
im = tile(im, (1, 1, 3))
if im.shape[2] > 3:
# Chop off transparency
im = im[:, :, :3]
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # Convert native OpenCV BGR -> RGB
def crop_to_square(frame):
i_size, j_size = frame.shape[0], frame.shape[1]
if j_size > i_size:
# landscape
offset = (j_size - i_size) / 2
return frame[:, offset:offset + i_size, :]
else:
# portrait
offset = (i_size - j_size) / 2
return frame[offset:offset + j_size, :, :]
def cv2_imshow_rgb(window_name, img):
# Convert native OpenCV BGR -> RGB before displaying
cv2.imshow(window_name, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def caffe_load_image(filename, color=True, as_uint=False):
'''
Copied from Caffe to simplify potential import problems.
Load an image converting from grayscale or alpha as needed.
Take
filename: string
color: flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Give
image: an image with type float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
'''
with WithTimer('imread', quiet=True):
if as_uint:
img = skimage.io.imread(filename)
else:
img = skimage.img_as_float(skimage.io.imread(filename)).astype(float32)
if img.ndim == 2:
img = img[:, :, newaxis]
if color:
img = tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def get_tiles_height_width(n_tiles, desired_width=None):
'''Get a height x width size that will fit n_tiles tiles.'''
if desired_width == None:
# square
width = int(ceil(sqrt(n_tiles)))
height = width
else:
assert isinstance(desired_width, int)
width = desired_width
height = int(ceil(float(n_tiles) / width))
return height, width
def get_tiles_height_width_ratio(n_tiles, width_ratio=1.0):
'''Get a height x width size that will fit n_tiles tiles.'''
width = int(ceil(sqrt(n_tiles * width_ratio)))
return get_tiles_height_width(n_tiles, desired_width=width)
def tile_images_normalize(data, c01=False, boost_indiv=0.0, boost_gamma=1.0, single_tile=False, scale_range=1.0,
neg_pos_colors=None):
data = data.copy()
if single_tile:
# promote 2D image -> 3D batch (01 -> b01) or 3D image -> 4D batch (01c -> b01c OR c01 -> bc01)
data = data[newaxis]
if c01:
# Convert bc01 -> b01c
assert len(data.shape) == 4, 'expected bc01 data'
data = data.transpose(0, 2, 3, 1)
if neg_pos_colors:
neg_clr, pos_clr = neg_pos_colors
neg_clr = array(neg_clr).reshape((1, 3))
pos_clr = array(pos_clr).reshape((1, 3))
# Keep 0 at 0
data /= max(data.max(), -data.min()) + 1e-10 # Map data to [-1, 1]
# data += .5 * scale_range # now in [0, scale_range]
# assert data.min() >= 0
# assert data.max() <= scale_range
if len(data.shape) == 3:
data = data.reshape(data.shape + (1,))
assert data.shape[3] == 1, 'neg_pos_color only makes sense if color data is not provided (channels should be 1)'
data = dot((data > 0) * data, pos_clr) + dot((data < 0) * -data, neg_clr)
data -= data.min()
data *= scale_range / (data.max() + 1e-10)
# sqrt-scale (0->0, .1->.3, 1->1)
assert boost_indiv >= 0 and boost_indiv <= 1, 'boost_indiv out of range'
# print 'using boost_indiv:', boost_indiv
if boost_indiv > 0:
if len(data.shape) == 4:
mm = (data.max(-1).max(-1).max(-1) + 1e-10) ** -boost_indiv
else:
mm = (data.max(-1).max(-1) + 1e-10) ** -boost_indiv
data = (data.T * mm).T
if boost_gamma != 1.0:
data = data ** boost_gamma
# Promote single-channel data to 3 channel color
if len(data.shape) == 3:
# b01 -> b01c
data = tile(data[:, :, :, newaxis], 3)
return data
def tile_images_make_tiles(data, padsize=1, padval=0, hw=None, highlights=None):
if hw:
height, width = hw
else:
height, width = get_tiles_height_width(data.shape[0])
assert height * width >= data.shape[0], '{} rows x {} columns cannot fit {} tiles'.format(height, width,
data.shape[0])
# First iteration: one-way padding, no highlights
# padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = pad(data, padding, mode='constant', constant_values=(padval, padval))
# Second iteration: padding with highlights
# padding = ((0, width*height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (data.ndim - 3)
# print 'tile_images: data min,max =', data.min(), data.max()
# padder = SmartPadder()
##data = pad(data, padding, mode=jy_pad_fn)
# data = pad(data, padding, mode=padder.pad_function)
# print 'padder.calls =', padder.calls
# Third iteration: two-way padding with highlights
if highlights is not None:
assert len(highlights) == data.shape[0]
padding = ((0, width * height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (
data.ndim - 3)
# First pad with constant vals
try:
len(padval)
except:
padval = tuple((padval,))
assert len(padval) in (1, 3), 'padval should be grayscale (len 1) or color (len 3)'
if len(padval) == 1:
data = pad(data, padding, mode='constant', constant_values=(padval, padval))
else:
data = pad(data, padding, mode='constant', constant_values=(0, 0))
for cc in (0, 1, 2):
# Replace 0s with proper color in each channel
data[:padding[0][0], :, :, cc] = padval[cc]
if padding[0][1] > 0:
data[-padding[0][1]:, :, :, cc] = padval[cc]
data[:, :padding[1][0], :, cc] = padval[cc]
if padding[1][1] > 0:
data[:, -padding[1][1]:, :, cc] = padval[cc]
data[:, :, :padding[2][0], cc] = padval[cc]
if padding[2][1] > 0:
data[:, :, -padding[2][1]:, cc] = padval[cc]
if highlights is not None:
# Then highlight if necessary
for ii, highlight in enumerate(highlights):
if highlight is not None:
data[ii, :padding[1][0], :, :] = highlight
if padding[1][1] > 0:
data[ii, -padding[1][1]:, :, :] = highlight
data[ii, :, :padding[2][0], :] = highlight
if padding[2][1] > 0:
data[ii, :, -padding[2][1]:, :] = highlight
# tile the filters into an image
data = data.reshape((height, width) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((height * data.shape[1], width * data.shape[3]) + data.shape[4:])
data = data[0:-padsize, 0:-padsize] # remove excess padding
return (height, width), data
def to_255(vals_01):
'''Convert vals in [0,1] to [0,255]'''
try:
ret = [v * 255 for v in vals_01]
if type(vals_01) is tuple:
return tuple(ret)
else:
return ret
except TypeError:
# Not iterable (single int or float)
return vals_01 * 255
def ensure_uint255_and_resize_to_fit(img, out_max_shape,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
as_uint255 = ensure_uint255(img)
return resize_to_fit(as_uint255, out_max_shape,
dtype_out='uint8',
shrink_interpolation=shrink_interpolation,
grow_interpolation=grow_interpolation)
def ensure_uint255(arr):
'''If data is float, multiply by 255 and convert to uint8. Else leave as uint8.'''
if arr.dtype == 'uint8':
return arr
elif arr.dtype in ('float32', 'float64'):
# print 'extra check...'
# assert arr.max() <= 1.1
return array(arr * 255, dtype='uint8')
else:
raise Exception('ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def ensure_float01(arr, dtype_preference='float32'):
'''If data is uint, convert to float and divide by 255. Else leave at float.'''
if arr.dtype == 'uint8':
# print 'extra check...'
# assert arr.max() <= 256
return array(arr, dtype=dtype_preference) / 255
elif arr.dtype in ('float32', 'float64'):
return arr
else:
raise Exception('ensure_float01 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def resize_to_fit(img, out_max_shape,
dtype_out=None,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
'''Resizes to fit within out_max_shape. If ratio is different,
returns an image that fits but is smaller along one of the two
dimensions.
If one of the out_max_shape dimensions is None, then use only the other dimension to perform resizing.
Timing info on MBP Retina with OpenBlas:
- conclusion: uint8 is always tied or faster. float64 is slower.
Scaling down:
In [79]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[79]: 0.04950380325317383
In [77]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[77]: 0.049156904220581055
In [76]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[76]: 0.11808204650878906
Scaling up:
In [68]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[68]: 0.4357950687408447
In [70]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[70]: 1.3411099910736084
In [73]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[73]: 2.6078310012817383
'''
if dtype_out is not None and img.dtype != dtype_out:
dtype_in_size = img.dtype.itemsize
dtype_out_size = dtype(dtype_out).itemsize
convert_early = (dtype_out_size < dtype_in_size)
convert_late = not convert_early
else:
convert_early = False
convert_late = False
if img.shape[0] == 0 and img.shape[1] == 0:
scale = 1
elif out_max_shape[0] is None or img.shape[0] == 0:
scale = float(out_max_shape[1]) / img.shape[1]
elif out_max_shape[1] is None or img.shape[1] == 0:
scale = float(out_max_shape[0]) / img.shape[0]
else:
scale = min(float(out_max_shape[0]) / img.shape[0],
float(out_max_shape[1]) / img.shape[1])
if convert_early:
img = array(img, dtype=dtype_out)
out = cv2.resize(img,
(int(img.shape[1] * scale), int(img.shape[0] * scale)), # in (c,r) order
interpolation=grow_interpolation if scale > 1 else shrink_interpolation)
if convert_late:
out = array(out, dtype=dtype_out)
return out
class FormattedString(object):
def __init__(self, string, defaults, face=None, fsize=None, clr=None, thick=None, align=None, width=None):
self.string = string
self.face = face if face else defaults['face']
self.fsize = fsize if fsize else defaults['fsize']
self.clr = clr if clr else defaults['clr']
self.thick = thick if thick else defaults['thick']
self.width = width # if None: calculate width automatically
self.align = align if align else defaults.get('align', 'left')
def cv2_typeset_text(data, lines, loc, between=' ', string_spacing=0, line_spacing=0, wrap=False):
'''Typesets mutliple strings on multiple lines of text, where each string may have its own formatting.
Given:
data: as in cv2.putText
loc: as in cv2.putText
lines: list of lists of FormattedString objects, may be modified by this function!
between: what to insert between each string on each line, ala str.join
string_spacing: extra spacing to insert between strings on a line
line_spacing: extra spacing to insert between lines
wrap: if true, wraps words to next line
Returns:
locy: new y location = loc[1] + y-offset resulting from lines of text
'''
data_width = data.shape[1]
# lines_modified = False
# lines = lines_in # will be deepcopied if modification is needed later
if isinstance(lines, FormattedString):
lines = [lines]
assert isinstance(lines,
list), 'lines must be a list of lines or list of FormattedString objects or a single FormattedString object'
if len(lines) == 0:
return loc[1]
if not isinstance(lines[0], list):
# If a single line of text is given as a list of strings, convert to multiline format
lines = [lines]
locy = loc[1]
line_num = 0
while line_num < len(lines):
line = lines[line_num]
maxy = 0
locx = loc[0]
for ii, fs in enumerate(line):
last_on_line = (ii == len(line) - 1)
if not last_on_line:
fs.string += between
boxsize, _ = cv2.getTextSize(fs.string, fs.face, fs.fsize, fs.thick)
if fs.width is not None:
if fs.align == 'right':
locx += fs.width - boxsize[0]
elif fs.align == 'center':
locx += (fs.width - boxsize[0]) / 2
# print 'right boundary is', locx + boxsize[0], '(%s)' % fs.string
# print 'HERE'
right_edge = locx + boxsize[0]
if wrap and ii > 0 and right_edge > data_width:
# Wrap rest of line to the next line
# if not lines_modified:
# lines = deepcopy(lines_in)
# lines_modified = True
new_this_line = line[:ii]
new_next_line = line[ii:]
lines[line_num] = new_this_line
lines.insert(line_num + 1, new_next_line)
break
###line_num += 1
###continue
cv2.putText(data, fs.string, (locx, locy), fs.face, fs.fsize, fs.clr, fs.thick)
maxy = max(maxy, boxsize[1])
if fs.width is not None:
if fs.align == 'right':
locx += boxsize[0]
elif fs.align == 'left':
locx += fs.width
elif fs.align == 'center':
locx += fs.width - (fs.width - boxsize[0]) / 2
else:
locx += boxsize[0]
locx += string_spacing
line_num += 1
locy += maxy + line_spacing
return locy
def saveimage(filename, im):
'''Saves an image with pixel values in [0,1]'''
# matplotlib.image.imsave(filename, im)
if len(im.shape) == 3:
# Reverse RGB to OpenCV BGR order for color images
cv2.imwrite(filename, 255 * im[:, :, ::-1])
else:
cv2.imwrite(filename, 255 * im)
def saveimagesc(filename, im):
saveimage(filename, norm01(im))
def saveimagescc(filename, im, center):
saveimage(filename, norm01c(im, center))
| 34.617284
| 208
| 0.57191
| 4,396
| 30,844
| 3.895814
| 0.130346
| 0.009634
| 0.016817
| 0.00654
| 0.421581
| 0.366986
| 0.317879
| 0.284713
| 0.250204
| 0.246701
| 0
| 0.041885
| 0.299475
| 30,844
| 890
| 209
| 34.65618
| 0.750729
| 0.234827
| 0
| 0.355009
| 0
| 0
| 0.031495
| 0
| 0
| 0
| 0
| 0
| 0.017575
| 1
| 0.049209
| false
| 0
| 0.017575
| 0
| 0.119508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b78d074db83725adcb792c0532db942f29eb42
| 5,702
|
py
|
Python
|
py/test/selenium/webdriver/common/window_tests.py
|
ey-advisory-technology-testing/selenium
|
7e342d3b8eb913a9626475a158c4bc6ae5d68315
|
[
"Apache-2.0"
] | 1
|
2020-10-06T16:55:46.000Z
|
2020-10-06T16:55:46.000Z
|
py/test/selenium/webdriver/common/window_tests.py
|
ey-advisory-technology-testing/selenium
|
7e342d3b8eb913a9626475a158c4bc6ae5d68315
|
[
"Apache-2.0"
] | 2
|
2020-10-12T13:27:19.000Z
|
2020-10-12T15:32:45.000Z
|
py/test/selenium/webdriver/common/window_tests.py
|
ey-advisory-technology-testing/selenium
|
7e342d3b8eb913a9626475a158c4bc6ae5d68315
|
[
"Apache-2.0"
] | 1
|
2019-03-18T14:38:08.000Z
|
2019-03-18T14:38:08.000Z
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
# @pytest.mark.xfail_ie
# @pytest.mark.xfail_chromiumedge(reason="Fails on Travis")
# @pytest.mark.xfail_firefox(reason="Fails on Travis")
# @pytest.mark.xfail_remote(reason="Fails on Travis")
# def testShouldMaximizeTheWindow(driver):
# resize_timeout = 5
# wait = WebDriverWait(driver, resize_timeout)
# old_size = driver.get_window_size()
# driver.set_window_size(200, 200)
# wait.until(
# lambda dr: dr.get_window_size() != old_size if old_size["width"] != 200 and old_size["height"] != 200 else True)
# size = driver.get_window_size()
# driver.maximize_window()
# wait.until(lambda dr: dr.get_window_size() != size)
# new_size = driver.get_window_size()
# assert new_size["width"] > size["width"]
# assert new_size["height"] > size["height"]
def test_should_get_the_size_of_the_current_window(driver):
size = driver.get_window_size()
assert size.get('width') > 0
assert size.get('height') > 0
def test_should_set_the_size_of_the_current_window(driver):
size = driver.get_window_size()
target_width = size.get('width') - 20
target_height = size.get('height') - 20
driver.set_window_size(width=target_width, height=target_height)
new_size = driver.get_window_size()
assert new_size.get('width') == target_width
assert new_size.get('height') == target_height
def test_should_get_the_position_of_the_current_window(driver):
position = driver.get_window_position()
assert position.get('x') >= 0
assert position.get('y') >= 0
def test_should_set_the_position_of_the_current_window(driver):
position = driver.get_window_position()
target_x = position.get('x') + 10
target_y = position.get('y') + 10
driver.set_window_position(x=target_x, y=target_y)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != position['x'] and d.get_window_position()['y'] != position['y'])
new_position = driver.get_window_position()
assert new_position.get('x') == target_x
assert new_position.get('y') == target_y
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_get_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
assert rect.get('x') >= 0
assert rect.get('y') >= 0
assert rect.get('width') >= 0
assert rect.get('height') >= 0
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_set_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
target_x = rect.get('x') + 10
target_y = rect.get('y') + 10
target_width = rect.get('width') + 10
target_height = rect.get('height') + 10
driver.set_window_rect(x=target_x, y=target_y, width=target_width, height=target_height)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != rect['x'] and d.get_window_position()['y'] != rect['y'])
new_rect = driver.get_window_rect()
assert new_rect.get('x') == target_x
assert new_rect.get('y') == target_y
assert new_rect.get('width') == target_width
assert new_rect.get('height') == target_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Fullscreen command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Fullscreen command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_fullscreen_the_current_window(driver):
# start_width = driver.execute_script('return window.innerWidth;')
# start_height = driver.execute_script('return window.innerHeight;')
# driver.fullscreen_window()
# WebDriverWait(driver, 2)\
# .until(lambda d: driver.execute_script('return window.innerWidth;') > start_width)
# end_width = driver.execute_script('return window.innerWidth;')
# end_height = driver.execute_script('return window.innerHeight;')
# driver.quit() # Kill driver so we aren't running fullscreen after
# assert end_width > start_width
# assert end_height > start_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Minimize command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Minimize command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_minimize_the_current_window(driver):
# driver.minimize_window()
# minimized = driver.execute_script('return document.hidden;')
# driver.quit() # Kill driver so we aren't running minimized after
# assert minimized is True
| 38.268456
| 122
| 0.708699
| 782
| 5,702
| 4.939898
| 0.200767
| 0.046596
| 0.046596
| 0.04556
| 0.53663
| 0.528346
| 0.412374
| 0.345845
| 0.273363
| 0.234533
| 0
| 0.009344
| 0.174149
| 5,702
| 148
| 123
| 38.527027
| 0.811
| 0.518239
| 0
| 0.222222
| 0
| 0
| 0.06108
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 1
| 0.111111
| false
| 0
| 0.074074
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b811e924c93fc02a9d9d5f223ad493413f5e6c
| 21,307
|
py
|
Python
|
psydac/cad/geometry.py
|
mayuri-dhote/psydac
|
01ddbe2d049a599684c45060912d01c2658160a3
|
[
"MIT"
] | 5
|
2018-03-13T13:50:26.000Z
|
2018-12-22T14:04:11.000Z
|
psydac/cad/geometry.py
|
mayuri-dhote/psydac
|
01ddbe2d049a599684c45060912d01c2658160a3
|
[
"MIT"
] | 3
|
2019-02-08T13:29:47.000Z
|
2019-03-06T17:23:08.000Z
|
psydac/cad/geometry.py
|
mayuri-dhote/psydac
|
01ddbe2d049a599684c45060912d01c2658160a3
|
[
"MIT"
] | 1
|
2018-12-15T09:55:12.000Z
|
2018-12-15T09:55:12.000Z
|
# coding: utf-8
#
# a Geometry class contains the list of patches and additional information about
# the topology i.e. connectivity, boundaries
# For the moment, it is used as a container, that can be loaded from a file
# (hdf5)
from itertools import product
from collections import abc
import numpy as np
import string
import random
import h5py
import yaml
import os
import string
import random
from mpi4py import MPI
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.mapping.discrete import SplineMapping, NurbsMapping
from sympde.topology import Domain, Line, Square, Cube, NCubeInterior
from sympde.topology.basic import Union
#==============================================================================
class Geometry( object ):
_ldim = None
_pdim = None
_patches = []
_topology = None
#--------------------------------------------------------------------------
# Option [1]: from a (domain, mappings) or a file
#--------------------------------------------------------------------------
def __init__( self, domain=None, mappings=None,
filename=None, comm=MPI.COMM_WORLD ):
# ... read the geometry if the filename is given
if not( filename is None ):
self.read(filename, comm=comm)
elif not( domain is None ):
assert( isinstance( domain, Domain ) )
assert( not( mappings is None ))
assert isinstance( mappings, dict)
# ... check sanity
interior_names = sorted(domain.interior_names)
mappings_keys = sorted(list(mappings.keys()))
assert( interior_names == mappings_keys )
# ...
self._domain = domain
self._ldim = domain.dim
self._pdim = domain.dim # TODO must be given => only dim is defined for a Domain
self._mappings = mappings
else:
raise ValueError('Wrong input')
# ...
self._comm = comm
#--------------------------------------------------------------------------
# Option [2]: from a discrete mapping
#--------------------------------------------------------------------------
@classmethod
def from_discrete_mapping( cls, mapping, comm=None ):
"""Create a geometry from one discrete mapping."""
if mapping.ldim in [1]:
raise NotImplementedError('')
if mapping.ldim == 2:
domain = Square(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
elif mapping.ldim == 3:
domain = Cube(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
#--------------------------------------------------------------------------
# Option [3]: discrete topological line/square/cube
#--------------------------------------------------------------------------
@classmethod
def from_topological_domain(cls, domain, ncells, comm=None):
interior = domain.interior
if not isinstance(interior, Union):
interior = [interior]
for itr in interior:
if not isinstance(itr, NCubeInterior):
msg = "Topological domain must be an NCube;"\
" got {} instead.".format(type(itr))
raise TypeError(msg)
mappings = {itr.name: None for itr in interior}
geo = Geometry(domain=domain, mappings=mappings, comm=comm)
geo.ncells = ncells
return geo
#--------------------------------------------------------------------------
@property
def ldim(self):
return self._ldim
@property
def pdim(self):
return self._pdim
@property
def comm(self):
return self._comm
@property
def domain(self):
return self._domain
@property
def mappings(self):
return self._mappings
def __len__(self):
return len(self.domain)
def read( self, filename, comm=MPI.COMM_WORLD ):
# ... check extension of the file
basename, ext = os.path.splitext(filename)
if not(ext == '.h5'):
raise ValueError('> Only h5 files are supported')
# ...
# read the topological domain
domain = Domain.from_file(filename)
if not(comm is None):
kwargs = dict( driver='mpio', comm=comm ) if comm.size > 1 else {}
else:
kwargs = {}
h5 = h5py.File( filename, mode='r', **kwargs )
yml = yaml.load( h5['geometry.yml'][()], Loader=yaml.SafeLoader )
ldim = yml['ldim']
pdim = yml['pdim']
n_patches = len( yml['patches'] )
# ...
if n_patches == 0:
h5.close()
raise ValueError( "Input file contains no patches." )
# ...
# ... read patchs
mappings = {}
for i_patch in range( n_patches ):
item = yml['patches'][i_patch]
patch_name = item['name']
mapping_id = item['mapping_id']
dtype = item['type']
patch = h5[mapping_id]
if dtype in ['SplineMapping', 'NurbsMapping']:
degree = [int (p) for p in patch.attrs['degree' ]]
periodic = [bool(b) for b in patch.attrs['periodic']]
knots = [patch['knots_{}'.format(d)][:] for d in range( ldim )]
spaces = [SplineSpace( degree=p, knots=k, periodic=b )
for p,k,b in zip( degree, knots, periodic )]
tensor_space = TensorFemSpace( *spaces, comm=comm )
if dtype == 'SplineMapping':
mapping = SplineMapping.from_control_points( tensor_space,
patch['points'][..., :pdim] )
elif dtype == 'NurbsMapping':
mapping = NurbsMapping.from_control_points_weights( tensor_space,
patch['points'][..., :pdim],
patch['weights'] )
mapping.set_name( item['name'] )
mappings[patch_name] = mapping
# ...
# ... close the h5 file
h5.close()
# ...
# ...
self._ldim = ldim
self._pdim = pdim
self._mappings = mappings
self._domain = domain
# ...
def export( self, filename ):
"""
Parameters
----------
filename : str
Name of HDF5 output file.
"""
# ...
comm = self.comm
# ...
# Create dictionary with geometry metadata
yml = {}
yml['ldim'] = self.ldim
yml['pdim'] = self.pdim
# ... information about the patches
if not( self.mappings ):
raise ValueError('No mappings were found')
patches_info = []
i_mapping = 0
for patch_name, mapping in self.mappings.items():
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = '{}'.format( type( mapping ).__name__ )
patches_info += [{'name': name,
'mapping_id': mapping_id,
'type': dtype}]
i_mapping += 1
yml['patches'] = patches_info
# ...
# ... topology
topo_yml = self.domain.todict()
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geo, dtype='S' )
# ...
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geo, dtype='S' )
# ...
i_mapping = 0
for patch_name, mapping in self.mappings.items():
space = mapping.space
# Create group for patch 0
group = h5.create_group( yml['patches'][i_mapping]['mapping_id'] )
group.attrs['shape' ] = space.vector_space.npts
group.attrs['degree' ] = space.degree
group.attrs['rational' ] = False # TODO remove
group.attrs['periodic' ] = space.periodic
for d in range( self.ldim ):
group['knots_{}'.format( d )] = space.spaces[d].knots
# Collective: create dataset for control points
shape = [n for n in space.vector_space.npts] + [self.pdim]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'points', shape=shape, dtype=dtype )
# Independent: write control points to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)] + [slice(None)]
index = tuple( index )
dset[index] = mapping.control_points[index]
# case of NURBS
if isinstance(mapping, NurbsMapping):
# Collective: create dataset for weights
shape = [n for n in space.vector_space.npts]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'weights', shape=shape, dtype=dtype )
# Independent: write weights to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)]
index = tuple( index )
dset[index] = mapping.weights[index]
i_mapping += 1
# Close HDF5 file
h5.close()
#==============================================================================
def export_nurbs_to_hdf5(filename, nurbs, periodic=None, comm=None ):
"""
Export a single-patch igakit NURBS object to a Psydac geometry file in HDF5 format
Parameters
----------
filename : <str>
Name of output geometry file, e.g. 'geo.h5'
nurbs : <igakit.nurbs.NURBS>
igakit geometry nurbs object
comm : <MPI.COMM>
mpi communicator
"""
import os.path
import igakit
assert isinstance(nurbs, igakit.nurbs.NURBS)
extension = os.path.splitext(filename)[-1]
if not extension == '.h5':
raise ValueError('> Only h5 extension is allowed for filename')
yml = {}
yml['ldim'] = nurbs.dim
yml['pdim'] = nurbs.dim
patches_info = []
i_mapping = 0
i = 0
rational = not abs(nurbs.weights-1).sum()<1e-15
patch_name = 'patch_{}'.format(i)
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = 'NurbsMapping' if rational else 'SplineMapping'
patches_info += [{'name': name , 'mapping_id':mapping_id, 'type':dtype}]
yml['patches'] = patches_info
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geom, dtype='S' )
# ...
# ... topology
if nurbs.dim == 1:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
domain = Line(patch_name, bounds1=bounds1)
elif nurbs.dim == 2:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
domain = Square(patch_name, bounds1=bounds1, bounds2=bounds2)
elif nurbs.dim == 3:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
bounds3 = (float(nurbs.breaks(2)[0]), float(nurbs.breaks(2)[-1]))
domain = Cube(patch_name, bounds1=bounds1, bounds2=bounds2, bounds3=bounds3)
topo_yml = domain.todict()
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geom, dtype='S' )
group = h5.create_group( yml['patches'][i]['mapping_id'] )
group.attrs['degree' ] = nurbs.degree
group.attrs['rational' ] = rational
group.attrs['periodic' ] = tuple( False for d in range( nurbs.dim ) ) if periodic is None else periodic
for d in range( nurbs.dim ):
group['knots_{}'.format( d )] = nurbs.knots[d]
group['points'] = nurbs.points[...,:nurbs.dim]
if rational:
group['weights'] = nurbs.weights
h5.close()
#==============================================================================
def refine_nurbs(nrb, ncells=None, degree=None, multiplicity=None, tol=1e-9):
"""
This function refines the nurbs object.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
nrb : <igakit.nurbs.NURBS>
geometry nurbs object
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
nrb : <igakit.nurbs.NURBS>
the refined geometry nurbs object
"""
if multiplicity is None:
multiplicity = [1]*nrb.dim
nrb = nrb.clone()
if ncells is not None:
for axis in range(0,nrb.dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
if degree is not None:
for axis in range(0,nrb.dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(nrb.dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb
def refine_knots(knots, ncells, degree, multiplicity=None, tol=1e-9):
"""
This function refines the knot sequence.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
knots : <list>
list of knot sequences in each direction
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
knots : <list>
the refined knot sequences in each direction
"""
from igakit.nurbs import NURBS
dim = len(ncells)
if multiplicity is None:
multiplicity = [1]*dim
assert len(knots) == dim
nrb = NURBS(knots)
for axis in range(dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
for axis in range(dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb.knots
#==============================================================================
def import_geopdes_to_nurbs(filename):
"""
This function reads a geopdes geometry file and convert it to igakit nurbs object
Parameters
----------
filename : <str>
the filename of the geometry file
Returns
-------
nrb : <igakit.nurbs.NURBS>
the geometry nurbs object
"""
extension = os.path.splitext(filename)[-1]
if not extension == '.txt':
raise ValueError('> Expected .txt extension')
f = open(filename)
lines = f.readlines()
f.close()
lines = [line for line in lines if line[0].strip() != "#"]
data = _read_header(lines[0])
n_dim = data[0]
r_dim = data[1]
n_patchs = data[2]
n_lines_per_patch = 3*n_dim + 1
list_begin_line = _get_begin_line(lines, n_patchs)
nrb = _read_patch(lines, 1, n_lines_per_patch, list_begin_line)
return nrb
def _read_header(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
pass
return data
def _extract_patch_line(lines, i_patch):
text = "PATCH " + str(i_patch)
for i_line,line in enumerate(lines):
r = line.find(text)
if r != -1:
return i_line
return None
def _get_begin_line(lines, n_patchs):
list_begin_line = []
for i_patch in range(0, n_patchs):
r = _extract_patch_line(lines, i_patch+1)
if r is not None:
list_begin_line.append(r)
else:
raise ValueError(" could not parse the input file")
return list_begin_line
def _read_line(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
try:
data.append(float(c))
except:
pass
return data
def _read_patch(lines, i_patch, n_lines_per_patch, list_begin_line):
from igakit.nurbs import NURBS
i_begin_line = list_begin_line[i_patch-1]
data_patch = []
for i in range(i_begin_line+1, i_begin_line + n_lines_per_patch+1):
data_patch.append(_read_line(lines[i]))
degree = data_patch[0]
shape = data_patch[1]
xl = [np.array(i) for i in data_patch[2:2+len(degree)] ]
xp = [np.array(i) for i in data_patch[2+len(degree):2+2*len(degree)] ]
w = np.array(data_patch[2+2*len(degree)])
X = [i.reshape(shape, order='F') for i in xp]
W = w.reshape(shape, order='F')
points = np.zeros((*shape, 3))
for i in range(len(shape)):
points[..., i] = X[i]
knots = xl
nrb = NURBS(knots, control=points, weights=W)
return nrb
| 31.659733
| 112
| 0.546487
| 2,533
| 21,307
| 4.512041
| 0.129491
| 0.008575
| 0.016799
| 0.008925
| 0.505994
| 0.474057
| 0.418497
| 0.407122
| 0.400822
| 0.366874
| 0
| 0.01093
| 0.304407
| 21,307
| 672
| 113
| 31.706845
| 0.760205
| 0.220397
| 0
| 0.386667
| 0
| 0
| 0.049562
| 0
| 0
| 0
| 0
| 0.002976
| 0.016
| 1
| 0.053333
| false
| 0.005333
| 0.056
| 0.016
| 0.170667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b8df2c0cc835ac66fd2676a3d3a8a967b603f8
| 6,098
|
py
|
Python
|
utils.py
|
ok1zjf/AMNet
|
51b163eec63d6d1e2e3dbc140d19afdc7b4273ee
|
[
"MIT"
] | 40
|
2018-06-20T20:33:38.000Z
|
2022-03-21T02:00:34.000Z
|
utils.py
|
RMSnow/AMNet-Rumor
|
95321bb30a303994cfae769801207bbde91d77fb
|
[
"MIT"
] | 5
|
2018-07-26T17:23:07.000Z
|
2020-05-05T15:30:18.000Z
|
utils.py
|
RMSnow/AMNet-Rumor
|
95321bb30a303994cfae769801207bbde91d77fb
|
[
"MIT"
] | 10
|
2018-04-10T09:42:55.000Z
|
2021-04-19T19:01:27.000Z
|
__author__ = 'Jiri Fajtl'
__email__ = 'ok1zjf@gmail.com'
__version__= '2.2'
__status__ = "Research"
__date__ = "28/1/2018"
__license__= "MIT License"
import os
import numpy as np
import glob
import subprocess
import platform
import sys
import pkg_resources
import torch
import PIL as Image
try:
import cv2
except:
print("WARNING: Could not load OpenCV python package. Some functionality may not be available.")
def list_files(path, extensions=[], sort=True, max_len=-1):
if os.path.isdir(path):
filenames = [os.path.join(path, fn) for fn in os.listdir(path) if
any([fn.endswith(ext) for ext in extensions])]
else:
print("ERROR. ", path,' is not a directory!')
return []
if sort:
filenames.sort()
if max_len>-1:
filenames = filenames[:max_len]
return filenames
def get_video_list(video_path, max_len=-1):
return list_files(video_path, extensions=['avi', 'flv', 'mpg', 'mp4'], sort=True, max_len=max_len)
def get_image_list(video_path, max_len=-1):
return list_files(video_path, extensions=['jpg', 'jpeg', 'png'], sort=True, max_len=max_len)
def get_split_files(dataset_path, splits_path, split_name, absolute_path=False):
path = os.path.join(dataset_path, splits_path, split_name)
files = glob.glob(path)
files.sort()
if not absolute_path:
files_out = []
for file in files:
_,filename = os.path.split(file)
files_out.append(filename)
return files_out
return files
def get_max_rc_weights(experiment_path):
log_filename = 'train_log_0.csv'
try:
f = open(os.path.join(experiment_path, log_filename), 'rt')
max_rc = 0
max_epoch = -1
max_mse = -1
for line in f:
toks = line.split(',')
if toks[0] == 'val':
epoch = toks[1]
try:
rc = float(toks[4])
if rc > max_rc:
max_rc = rc
max_epoch = int(epoch)
max_mse = float(toks[6])
except:
pass
f.close()
chkpt_file = experiment_path + '/' + 'weights_' + str(max_epoch) + '.pkl'
if not os.path.isfile(chkpt_file):
print("WARNING: File ",chkpt_file," does not exists!")
return '', 0, 0, 0
return chkpt_file, max_rc, max_mse, max_epoch
except:
print('WARNING: Could not open ' + os.path.join(experiment_path, log_filename))
return '', 0, 0, 0
def get_split_index(split_filename):
filename, _ = os.path.splitext(split_filename)
id = int(filename.split('_')[-1])
return id
def get_weight_files(split_files, experiment_name, max_rc_checkpoints=True):
data_dir = 'data'
weight_files = []
for split_filename in split_files:
split_name,_ = os.path.splitext(split_filename)
_, split_id = split_name.split('_')
weight_files_all = os.path.join(data_dir, experiment_name+'_train_'+split_id+'/*.pkl')
files = glob.glob(weight_files_all)
if len(files) == 0:
# No trained model weights for this split
weight_files.append('')
continue
elif len(files) == 1:
weight_files.append(files[0])
else:
# Multiple weights
if max_rc_checkpoints:
weights_dir = os.path.join(data_dir, experiment_name + '_train_' + split_id)
print("Selecting model weights with the highest RC on validation set in ",weights_dir)
weight_file, max_rc, max_mse, max_epoch= get_max_rc_weights(weights_dir)
if weight_file != '':
print('Found: ',weight_file, ' RC=', max_rc, ' MSE=', max_rc, ' epoch=', max_epoch)
weight_files.append(weight_file)
continue
# Get the weights from the last training epoch
files.sort(key=lambda x: get_split_index(x), reverse=True)
weight_file=files[0]
weight_files.append(weight_file)
return weight_files
def run_command(command):
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return '\n'.join([ '\t'+line.decode("utf-8").strip() for line in p.stdout.readlines()])
def ge_pkg_versions():
dep_versions = {}
cmd = 'cat /proc/driver/nvidia/version'
display_driver = run_command(cmd)
dep_versions['display'] = display_driver
dep_versions['cuda'] = 'NA'
cuda_home = '/usr/local/cuda/'
if 'CUDA_HOME' in os.environ:
cuda_home = os.environ['CUDA_HOME']
cmd = cuda_home+'/version.txt'
if os.path.isfile(cmd):
cuda_version = run_command('cat '+cmd)
dep_versions['cuda'] = cuda_version
dep_versions['cudnn'] = torch.backends.cudnn.version()
dep_versions['platform'] = platform.platform()
dep_versions['python'] = sys.version_info[0]
dep_versions['torch'] = torch.__version__
dep_versions['numpy'] = np.__version__
dep_versions['PIL'] = Image.VERSION
dep_versions['OpenCV'] = 'NA'
if 'cv2' in sys.modules:
dep_versions['OpenCV'] = cv2.__version__
dep_versions['torchvision'] = pkg_resources.get_distribution("torchvision").version
return dep_versions
def print_pkg_versions():
print("Packages & system versions:")
print("----------------------------------------------------------------------")
versions = ge_pkg_versions()
for key, val in versions.items():
print(key,": ",val)
print("")
return
if __name__ == "__main__":
print_pkg_versions()
split_files = get_split_files('datasets/lamem', 'splits', 'test_*.txt')
print(split_files)
weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3_last', max_rc_checkpoints=True)
# weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3')
print(weight_files)
| 30.49
| 120
| 0.611512
| 774
| 6,098
| 4.51938
| 0.250646
| 0.044025
| 0.017153
| 0.012007
| 0.218982
| 0.173242
| 0.156089
| 0.132075
| 0.094911
| 0.094911
| 0
| 0.009991
| 0.261397
| 6,098
| 200
| 121
| 30.49
| 0.766652
| 0.03083
| 0
| 0.095238
| 0
| 0
| 0.125804
| 0.020996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068027
| false
| 0.006803
| 0.068027
| 0.013605
| 0.231293
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0b9af8b61e8657e680602511286b7396f0d35fe
| 1,075
|
py
|
Python
|
axelrod/tests/strategies/test_mystrategy.py
|
AleksaLuka/Axelrod
|
5f2fefcb2bf8f371ef489382f90f116b46ac1023
|
[
"MIT"
] | null | null | null |
axelrod/tests/strategies/test_mystrategy.py
|
AleksaLuka/Axelrod
|
5f2fefcb2bf8f371ef489382f90f116b46ac1023
|
[
"MIT"
] | null | null | null |
axelrod/tests/strategies/test_mystrategy.py
|
AleksaLuka/Axelrod
|
5f2fefcb2bf8f371ef489382f90f116b46ac1023
|
[
"MIT"
] | null | null | null |
import axelrod as axl
from .test_player import TestPlayer
C, D = axl.Action.C, axl.Action.D
class TestMyStrategy(TestPlayer):
name = "MyStrategy"
player = axl.mystrategy
expected_classifier = {
"memory_depth": 1,
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
# First move is random.
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=1
)
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=2
)
actions = [(C, C), (C, C), (C, C)]
self.versus_test(
opponent=axl.Cooperator(), expected_actions=actions, seed=1
)
actions = [(C, D), (D, D), (D, D)]
self.versus_test(
opponent=axl.Defector(), expected_actions=actions, seed=2
)
| 28.289474
| 71
| 0.563721
| 125
| 1,075
| 4.712
| 0.352
| 0.03056
| 0.03056
| 0.149406
| 0.455008
| 0.356537
| 0.312394
| 0.251273
| 0.251273
| 0.251273
| 0
| 0.006631
| 0.298605
| 1,075
| 37
| 72
| 29.054054
| 0.774536
| 0.019535
| 0
| 0.193548
| 0
| 0
| 0.090304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0be43ac7d66987096cd0a5bf59621233ca9d1a8
| 37,056
|
py
|
Python
|
src/audio_korpora_pipeline/inputadapter/adapters.py
|
WernerDreier/audio-korpora-pipeline
|
ac171cdfb0663c7b6250c06cc9c70a951b908251
|
[
"MIT"
] | 1
|
2020-09-11T05:27:58.000Z
|
2020-09-11T05:27:58.000Z
|
src/audio_korpora_pipeline/inputadapter/adapters.py
|
WernerDreier/audio-korpora-pipeline
|
ac171cdfb0663c7b6250c06cc9c70a951b908251
|
[
"MIT"
] | null | null | null |
src/audio_korpora_pipeline/inputadapter/adapters.py
|
WernerDreier/audio-korpora-pipeline
|
ac171cdfb0663c7b6250c06cc9c70a951b908251
|
[
"MIT"
] | null | null | null |
import concurrent
import os
import re
import shutil
import xml.etree.ElementTree as ET # TODO do we have this as requirement?
from concurrent.futures import as_completed
from concurrent.futures._base import as_completed
from pathlib import Path
import ffmpeg
import pandas as pd
import webrtcvad
from audio_korpora_pipeline.baseobjects import FileHandlingObject
from audio_korpora_pipeline.inputadapter.audiosplit.splitter import Splitter
from audio_korpora_pipeline.metamodel.mediasession import MediaAnnotationBundle, \
MediaAnnotationBundleWithoutTranscription, WrittenResource, MediaFile, \
MediaSessionActor, Sex, \
MediaSessionActors, MediaSession
class Adapter(FileHandlingObject):
def __init__(self, config):
super(Adapter, self).__init__()
def toMetamodel(self) -> MediaSession:
raise NotImplementedError("Please use a subclass")
def skipAlreadyProcessedFiles(self):
skip = self.config['global']['skipAlreadyProcessedFiles']
if not (skip):
self.logger.warn("No config setting for skipAlreadyProcessedFiles set. Assuming True")
return True
return skip
class UntranscribedMediaSplittingAdapter(Adapter):
AUDIO_SPLIT_AGRESSIVENESS = 3 # webrtcvad 1 (low), 3 (max)
ADAPTERNAME = "MediaSplittingAdapter"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(UntranscribedMediaSplittingAdapter, self).__init__(config=config)
self.config = config
self.mediaSessionActors.add(MediaSessionActor("UNKNOWN", Sex.UNKNOWN, None))
def _splitMonoRawAudioToVoiceSectionsThread(self, file, outputpath):
self.logger.debug("Splitting file into chunks: {}".format(self._getFilenameWithExtension(file)))
splitter = Splitter()
vad = webrtcvad.Vad(int(self.AUDIO_SPLIT_AGRESSIVENESS))
basename = self._getFilenameWithoutExtension(file)
audiochunkPathsForThisfile = []
try:
audio, sample_rate = splitter.read_wave(file)
frames = splitter.frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = splitter.vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = os.path.join(outputpath, basename + '_chunk_{:05d}.wav'.format(i))
self.logger.debug("Write chunk {} of file {}".format(i, file))
splitter.write_wave(path, segment, sample_rate)
audiochunkPathsForThisfile.append(path)
# write staging complete file
stagingPath = os.path.join(outputpath, basename + ".stagingComplete")
with open(stagingPath, 'a'):
os.utime(stagingPath, None)
self.logger.debug("Finished splitting file {}".format(file))
except Exception as excep:
self.logger.warn("Could split file into chunks {}. Skipping".format(file), exc_info=excep)
return (False, str(file), []) # returning an empty list, as no success here
return (True, str(file), audiochunkPathsForThisfile)
def _convertMediafileToMonoAudioThread(self, filenumber, totalNumberOfFiles, singleFilepathToProcess, outputPath):
self.logger.debug(
"Processing file {}/{} on path {}".format(filenumber + 1, totalNumberOfFiles, singleFilepathToProcess))
nextFilename = os.path.join(outputPath, self._getFilenameWithoutExtension(singleFilepathToProcess) + ".wav")
try:
(ffmpeg
.input(singleFilepathToProcess)
.output(nextFilename, format='wav', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run()
)
except ffmpeg.Error as ffmpgError:
self.logger.warn("Ffmpeg rose an error", exc_info=ffmpgError)
self.logger.warn("Due to error of ffmpeg skipped file {}".format(singleFilepathToProcess))
return (False, str(singleFilepathToProcess), str(nextFilename))
except Exception as e:
self.logger.warn("Got an error while using ffmpeg for file {}".format(singleFilepathToProcess), exc_info=e)
return (False, str(singleFilepathToProcess), str(nextFilename))
return (True, str(singleFilepathToProcess), str(nextFilename))
def createMediaSession(self, bundles):
session = MediaSession(self.ADAPTERNAME, self.mediaSessionActors, bundles)
return session
def createMediaAnnotationBundles(self, audiochunks):
annotationBundles = []
for index, filepath in enumerate(audiochunks):
bundle = MediaAnnotationBundleWithoutTranscription(identifier=filepath) # we do not have any written ressources
bundle.setMediaFile(filepath)
annotationBundles.append(bundle)
return annotationBundles
def splitAudioToChunks(self, filesToChunk, outputPath):
if ((filesToChunk == None) or (len(filesToChunk) == 0)):
self.logger.info("Nothing to split, received empty wav-filenamelist")
return []
successfullyChunkedFiles = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(filesToChunk):
futures.append(
executor.submit(self._splitMonoRawAudioToVoiceSectionsThread, file, outputPath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt split audiofile {}, removing from list".format(future.result()[1]))
else:
successfullyChunkedFiles.extend(future.result()[2])
self.logger.debug("Splitting Audio is done {}".format(future.result()))
self.logger.debug("Finished splitting {} wav files".format(len(filesToChunk)))
return successfullyChunkedFiles
def determineWavFilesToChunk(self, baseFilesToChunk, stagingChunkPath):
allStageIndicatorFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".stagingComplete"}))
allExistingChunkedFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".wav"}))
allStageIndicatorFilesDictionary = self._toFilenameDictionary(allStageIndicatorFilesFullpath)
allBaseFilesDictionary = self._toFilenameDictionary(baseFilesToChunk)
stagingCompleteCorrectKeys = set(allBaseFilesDictionary.keys()).intersection(
set(allStageIndicatorFilesDictionary.keys()))
stagingIncompleteCorrectKeys = set(allBaseFilesDictionary.keys()).difference(
set(allStageIndicatorFilesDictionary.keys()))
stagingComplete = []
for fullpath in allExistingChunkedFilesFullpath:
if any(self._getFilenameWithoutExtension(fullpath).startswith(cm) for cm in stagingCompleteCorrectKeys):
stagingComplete.append(fullpath)
stagingIncomplete = [allBaseFilesDictionary[key] for key in stagingIncompleteCorrectKeys]
self.logger.debug("Got {} files not yet chunked".format(len(stagingIncomplete)))
self.logger.debug("Got {} files chunked".format(len(stagingComplete)))
return stagingIncomplete, stagingComplete
def convertMediaFilesToMonoAudio(self, filesToProcess, outputpath, adapterName):
if (filesToProcess == None or len(filesToProcess) == 0):
self.logger.debug("No files to convert for {}, skipping".format(adapterName))
return []
successfulFilenames = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, currentFile in enumerate(filesToProcess):
futures.append(
executor.submit(self._convertMediafileToMonoAudioThread, filenumber, len(filesToProcess),
currentFile, outputpath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt process audiofile {}, removing from list".format(future.result()[1]))
else:
successfulFilenames.append(future.result()[2])
self.logger.debug("Processing Audio is done {} for Converter {}".format(future.result(), adapterName))
return successfulFilenames
def _toFilenameDictionary(self, list):
if (list == None or len(list) == 0):
self.logger.debug("Got nothing in list, returning empty dictionary")
return dict()
listDict = dict()
for fullpath in list:
listDict[self._getFilenameWithoutExtension(fullpath)] = fullpath
self.logger.debug("Created dictionary of files of length {}".format(len(listDict)))
return listDict
def determineFilesToConvertToMonoFromGivenLists(self, alreadyStagedFiles, originalFiles, adaptername):
dictionaryOfOriginalFilepaths = self._toFilenameDictionary(originalFiles)
dictionaryOfStagedFilepaths = self._toFilenameDictionary(alreadyStagedFiles)
notYetProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).difference(set(dictionaryOfStagedFilepaths.keys()))
alreadyProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).intersection(
set(dictionaryOfStagedFilepaths.keys()))
fullpathsToNotYetProcessed = [dictionaryOfOriginalFilepaths[key] for key in notYetProcessedKeys]
fullpathsProcessed = [dictionaryOfStagedFilepaths[key] for key in alreadyProcessedKeys]
self.logger.debug("Got {} files not yet processed for corpus {}".format(len(notYetProcessedKeys), adaptername))
self.logger.debug("Got {} files already processed for corpus {}".format(len(alreadyProcessedKeys), adaptername))
return fullpathsToNotYetProcessed, fullpathsProcessed
def _preprocess_workflow_with_splitting(self, filesAlreadyProcessed, filesToProcess, monoPath, chunkPath,
adaptername):
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, monoPath, adaptername)
baseFilesToChunk = []
baseFilesToChunk = baseFilesToChunk + filesSuccessfullyProcessed + filesAlreadyProcessed
# split mono audio to chunks
filesToChunk, filesAlreadyChunked = self.determineWavFilesToChunk(baseFilesToChunk,
chunkPath)
filesSuccessfullyChunked = self.splitAudioToChunks(filesToChunk, chunkPath)
# add chunks to media session
mediaBundleFiles = [] + filesSuccessfullyChunked + filesAlreadyChunked
mediaAnnotationbundles = self.createMediaAnnotationBundles(mediaBundleFiles)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
class UntranscribedVideoAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "UntranscribedVideoAdapter"
def __init__(self, config):
super(UntranscribedVideoAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Untranscribed Video Korpus")
# convert video to mono audio
filesToProcess, filesAlreadyProcessed = self._determineVideoFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _validateKorpusPath(self):
korpus_path = self.config['untranscribed_videos_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineVideoFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".mp4"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original untranscribed mp4 files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
class ChJugendspracheAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "CHJugendspracheAdapter"
def __init__(self, config):
super(ChJugendspracheAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("CH-Jugendsprache Korpus")
# convert audio to mono audio
filesToProcess, filesAlreadyProcessed = self._determineChJugendspracheFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _determineChJugendspracheFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".WAV", ".wav"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original jugendsprache files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateKorpusPath(self):
korpus_path = self.config['ch_jugendsprache_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
class ArchimobAdapter(UntranscribedMediaSplittingAdapter):
"""
ArchimobAdapter
"""
ADAPTERNAME = "Archimob"
def __init__(self, config):
super(ArchimobAdapter, self).__init__(config=config)
self.config = config
def _validateKorpusPath(self):
korpus_path = self.config['archimob_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _transcription_pause_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_pause_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '@'-Symbol")
symbol = '@'
return symbol
def _transcription_vocal_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_vocal_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '#'-Symbol")
symbol = '#'
return symbol
def _validateWorkdir(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("archimob_staging")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineArchimobFilesToProcess(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".wav"}))
originalFiles = self._fixOriginalDatasetFlawsIfNecessary(originalFiles)
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateWorkdir(), {".wav"}))
self.logger.debug("Got {} original archimob files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def toMetamodel(self):
self.logger.debug("Archimob V2 Korpus")
# convert chunks to mono audio
filesToProcess, filesAlreadyProcessed = self._determineArchimobFilesToProcess()
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, self._validateWorkdir(),
self.ADAPTERNAME)
filesForMediaBundle = []
filesForMediaBundle = filesForMediaBundle + filesSuccessfullyProcessed + filesAlreadyProcessed
# add chunks to media session
mediaAnnotationbundles = self.createMediaAnnotationBundles(filesForMediaBundle)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
def createMediaSession(self, bundles):
actors = self._createMediaSessionActorsFromBundles(bundles)
session = MediaSession(self.ADAPTERNAME, actors, bundles)
return session
def createMediaAnnotationBundles(self, filesForMediaBundle):
allXmlOriginalTranscriptionFiles = self._archimobOriginalTranscriptionFiles(self._validateKorpusPath())
transcriptionsPerSpeaker = self._extract(allXmlOriginalTranscriptionFiles)
mediaFilesAndTranscription = self._onlyTranscriptionsWithMediaFilesAndViceVersa(transcriptionsPerSpeaker,
filesForMediaBundle)
mediaAnnotationBundles = self._createActualMediaAnnotationBundles(mediaFilesAndTranscription)
return mediaAnnotationBundles
def _fixOriginalDatasetFlawsIfNecessary(self, originalFiles):
# As of Archimobe release V2 there are some minor flaws in the data, which are treated sequentially
if (self._fixForDuplicateWavs1063Necessary(originalFiles)):
originalFiles = self._fixForDuplicateWavs1063(originalFiles)
if (self._fixForWrongFilenames1082Necessary(originalFiles)):
originalFiles = self._fixForWrongFilenames1082(originalFiles)
return originalFiles
def _fixForDuplicateWavs1063Necessary(self, originalFiles):
# This flaw is simply, that within 1063 there exists another folder 1063 containing all files again
existingPathsForDoubled1063 = list(
filter(lambda file: os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file, originalFiles))
fixNecessary = len(existingPathsForDoubled1063) > 0
self.logger.info("Found {} files of speaker 1063 which are duplicates. They will be ignored".format(
len(existingPathsForDoubled1063)))
return fixNecessary
def _fixForDuplicateWavs1063(self, originalFiles):
# fix is simply by removing the files in question from list
pathsWithout1063duplicates = list(
filter(lambda file: not (os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file), originalFiles))
originalFiles = pathsWithout1063duplicates
return originalFiles
def _fixForWrongFilenames1082Necessary(self, originalFiles):
regexForFindingWrongNames = "(^\d{4}_\d)(d\d{4}_.*\.wav)" # like 1082_2d1082_2_TLI_3.wav
onlyFilenames = [os.path.basename(filename) for filename in originalFiles]
for filename in onlyFilenames:
m = re.search(regexForFindingWrongNames, filename)
if (not (m is None)):
return True
return False
def _fixForWrongFilenames1082(self, originalFiles):
fixedFiles = originalFiles.copy()
regexForFindingWrongFullpaths = "(.*\\" + os.path.sep + ")(\d{4}_\d)(d\d{4}_.*\.wav)" # like /home/somebody/files/1082/1082_2d1082_2_TLI_3.wav
for filename in originalFiles:
m = re.search(regexForFindingWrongFullpaths, filename)
if (not (m is None)):
newFilename = m.group(1) + m.group(3)
self.logger.debug(
"Fix 1082: Renaming file {} from {} to {}".format(m.group(2) + m.group(3), filename, newFilename))
try:
shutil.move(filename, newFilename)
fixedFiles.append(newFilename)
except Exception as inst:
self.logger.warn(
"Could not move file {} to {}, skipping and just removing from usable filenames".format(filename,
newFilename),
exc_info=inst)
fixedFiles.remove(filename)
return fixedFiles
def _archimobOriginalTranscriptionFiles(self, path):
xmlOriginalFiles = list(Path(path).glob("**/*.xml"))
self.logger.debug("Found {} original xml files for archimob".format(len(xmlOriginalFiles)))
return xmlOriginalFiles
def _extract(self, allXmlOriginalTranscriptionFiles):
transcriptionsPerSpeaker = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(allXmlOriginalTranscriptionFiles):
futures.append(executor.submit(self._extractSingleXmlFileThread, file))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt extract metadata for file {}, removing from list".format(future.result()[1]))
else:
transcriptionsPerSpeaker.append(
(future.result()[1], future.result()[2])) # tuple of original file and transcription dataframe
self.logger.debug("Extracting metadata for speaker finished {}".format(future.result()))
self.logger.debug("Finished metadata extraction for all {} xml files".format(len(allXmlOriginalTranscriptionFiles)))
return transcriptionsPerSpeaker
def _extractSingleXmlFileThread(self, xmlFile):
namespaceprefix = "{http://www.tei-c.org/ns/1.0}"
try:
tree = ET.parse(xmlFile)
root = tree.getroot()
ch_datacolumns = pd.DataFrame(columns=['Filename', 'transcript'])
transcriptionForSpeaker = pd.DataFrame(columns=ch_datacolumns.columns)
tagsToIgnore = set([namespaceprefix + tag for tag in {"gap", "incident", "kinesic", "other"}])
for utteranceTag in root.iter(namespaceprefix + 'u'):
media = utteranceTag.attrib['start']
filename = media.split('#')[1]
ch_transcript = [""]
for element in utteranceTag:
extractedWord = ""
if (namespaceprefix + "w" == element.tag):
extractedWord = self._extractWordTag(element)
if (namespaceprefix + "pause" == element.tag):
extractedWord = self._extractPauseTag(element)
if (namespaceprefix + "vocal" == element.tag):
extractedWord = self._extractVocalTag(namespaceprefix, element)
if (namespaceprefix + "del" == element.tag):
extractedWord = self._extractDeletionTag(element)
if (namespaceprefix + "unclear" == element.tag):
extractedWord = self._extractUnclearTag(namespaceprefix, element)
if (element.tag in tagsToIgnore):
self.logger.debug(
"Found tag {} which is in ignore list, ignoring the whole utterance {}".format(element.tag, filename))
break
if (extractedWord):
cleanedWord = self._cleanExtractedWord(extractedWord)
if (cleanedWord):
ch_transcript.append(cleanedWord)
try:
actualTranscript = " ".join(ch_transcript).strip()
if (not actualTranscript or (self._transcription_pause_tag_symbol() == actualTranscript)):
self.logger.debug("Skipping empty transcription for filename {}".format(filename))
continue
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': filename, 'transcript': actualTranscript}, ignore_index=True)
transcriptionForSpeaker = self._cleanSpecialCaseWhereTwoSentencesPerFileExist(transcriptionForSpeaker)
except Exception as e:
self.logger.warn("Couldn't append single utterance for filename {}".format(filename), exc_info=e)
continue
# writing is just for manual checking
transcriptionForSpeaker.to_csv(
os.path.join(self._getFullFilenameWithoutExtension(xmlFile) + "_transcript_CH.csv"),
header=True, index=False, encoding='utf-8')
return True, xmlFile, transcriptionForSpeaker
except Exception as e:
self.logger.warn("Couldn't extract metadata for xml file {}".format(xmlFile), exc_info=e)
return False, xmlFile, None
def _extractWordTag(self, element):
return element.text
def _extractPauseTag(self, element):
return self._transcription_pause_tag_symbol()
def _extractVocalTag(self, namespaceprefix, element):
desc = element.find(namespaceprefix + "desc")
if desc is not None:
return self._transcription_vocal_tag_symbol() + desc.text
return ""
def _extractDeletionTag(self, element):
truncatedTextWithPotentialSlash = element.text
if truncatedTextWithPotentialSlash:
truncatedText = truncatedTextWithPotentialSlash.replace("/", "")
return truncatedText
return ""
def _extractUnclearTag(self, namespaceprefix, element):
if element is not None:
wordsWithinUnclearTag = element.findall(namespaceprefix + 'w')
unclearText = []
for word in wordsWithinUnclearTag:
unclearText.append(word.text)
return " ".join(unclearText)
return ""
def _cleanExtractedWord(self, extractedWord):
# replace all tokens with gravis with their counterpart
# remove all chars not in allowed list
# Note: q,x and y are not allowed, as thos are not existing within transcription of archimob!
allowed_chars = {
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'z',
'ä', 'ö', 'ü',
' '
}
allowed_chars.add(self._transcription_pause_tag_symbol())
allowed_chars.add(self._transcription_vocal_tag_symbol())
whitespace_regex = re.compile(r'[ \t]+')
extractedWord = extractedWord.lower()
extractedWord = extractedWord.replace('á', 'a')
extractedWord = extractedWord.replace('à', 'a')
extractedWord = extractedWord.replace('â', 'a')
extractedWord = extractedWord.replace('ç', 'c')
extractedWord = extractedWord.replace('é', 'e')
extractedWord = extractedWord.replace('è', 'e')
extractedWord = extractedWord.replace('ê', 'e')
extractedWord = extractedWord.replace('í', 'i')
extractedWord = extractedWord.replace('ì', 'i')
extractedWord = extractedWord.replace('î', 'i')
extractedWord = extractedWord.replace('ñ', 'n')
extractedWord = extractedWord.replace('ó', 'o')
extractedWord = extractedWord.replace('ò', 'o')
extractedWord = extractedWord.replace('ô', 'o')
extractedWord = extractedWord.replace('ú', 'u')
extractedWord = extractedWord.replace('ù', 'u')
extractedWord = extractedWord.replace('ǜ', 'u')
extractedWord = extractedWord.replace('û', 'u')
extractedWord = extractedWord.replace('ș', 's')
extractedWord = extractedWord.replace('ş', 's')
extractedWord = extractedWord.replace('ß', 'ss')
extractedWord = extractedWord.replace('-', ' ')
# Those should not exist anymore, however, be safe
extractedWord = extractedWord.replace('–', ' ')
extractedWord = extractedWord.replace('/', ' ')
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = ''.join([char for char in extractedWord if char in allowed_chars])
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = extractedWord.strip()
return extractedWord
def _onlyTranscriptionsWithMediaFilesAndViceVersa(self, transcriptionsPerSpeaker, filesForMediaBundle):
if not transcriptionsPerSpeaker or not filesForMediaBundle:
return []
existingMediaFilesTuples = [(self._getFilenameWithoutExtension(mediafile), mediafile) for mediafile in
filesForMediaBundle]
existingMediaFiles, existingMediaFilesFullpath = zip(*existingMediaFilesTuples)
# combine all transcriptions
allTranscriptions = pd.concat([transcription[1] for transcription in transcriptionsPerSpeaker])
if any("-" in filename for filename in allTranscriptions.Filename) \
and not any("-" in filename for filename in existingMediaFiles):
self.logger.debug(
"Found filenames with dash (-) instead of underscore (_) but only filenames with underscore. Automatically fixing this...")
allTranscriptions.Filename = allTranscriptions.Filename.str.replace("-", "_")
# Find all files that exist in both sets
# TODO: Performance not good for 70k files
allMatchingTranscriptions = allTranscriptions[allTranscriptions.Filename.isin(existingMediaFiles)].copy()
allMatchingTranscriptions["FullpathFilename"] = ""
allMatchingTranscriptions.set_index("Filename", inplace=True)
for filenumber, existingFile in enumerate(existingMediaFiles):
allMatchingTranscriptions.loc[existingFile, "FullpathFilename"] = existingMediaFilesFullpath[filenumber]
return allMatchingTranscriptions[["FullpathFilename", "transcript"]].copy()
def _createActualMediaAnnotationBundles(self, mediaFilesAndTranscription):
bundles = []
for fileAndTranscription in mediaFilesAndTranscription.itertuples(index=False):
bundle = MediaAnnotationBundle(fileAndTranscription.FullpathFilename)
speakerId = self._speakerIdFromFullpath(fileAndTranscription.FullpathFilename)
bundle.setMediaFile(MediaFile(speakerId))
written_resource = WrittenResource(fileAndTranscription.transcript, speakerId, languageCode="CH",
annotationType=WrittenResource.DIETH_WITHOUT_GRAVIS)
bundle.setWrittenResource(written_resource)
bundles.append(bundle)
self.logger.debug("Created {} mediaAnnotationBundles out of {} transcriptions".format(len(bundles), len(
mediaFilesAndTranscription)))
return bundles
def _speakerIdFromFullpath(self, fullpathFilename):
return self._getFilenameWithoutExtension(fullpathFilename).split("_")[0]
def _createMediaSessionActorsFromBundles(self, bundles):
speakerIds = set([speaker.writtenResource.actorRef for speaker in bundles])
actors = [MediaSessionActor(speakerId, Sex.UNKNOWN, None) for speakerId in speakerIds]
return MediaSessionActors(actors)
def _cleanSpecialCaseWhereTwoSentencesPerFileExist(self, transcriptionForSpeaker):
if transcriptionForSpeaker is None or len(transcriptionForSpeaker) < 2:
return transcriptionForSpeaker
lastFilename = transcriptionForSpeaker.iloc[-1]["Filename"]
filenameBefore = transcriptionForSpeaker.iloc[-2]["Filename"]
if lastFilename == filenameBefore:
lastTranscription = transcriptionForSpeaker.iloc[-1]["transcript"]
transcriptionBefore = transcriptionForSpeaker.iloc[-2]["transcript"]
newTranscript = transcriptionBefore + " " + lastTranscription
transcriptionForSpeaker.drop(transcriptionForSpeaker.tail(2).index, inplace=True)
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': lastFilename, 'transcript': newTranscript}, ignore_index=True)
self.logger.info(
"Found a case {} where two sentences '{}' and '{}' are within one audio-file, merging them together".format(
lastFilename,
transcriptionBefore, lastTranscription))
return transcriptionForSpeaker
class CommonVoiceAdapter(Adapter):
RELATIVE_PATH_TO_AUDIO = "clips"
LANGUAGECODE_DE = "de_DE"
ADAPTERNAME = "CommonVoiceDE"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(CommonVoiceAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Created CommonVoice Adapter")
self.audiofilenames = self._readExistingAudioFiles()
self.speakermetadata = self._readExistingSpeakerMetadata()
self._persistMetamodel()
self._buildMediaSession()
return self.mediaSession
def _validateKorpusPath(self):
korpus_path = self.config['common_voice_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _existingAudioFileFullpath(self, filename):
return os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO, filename)
def _readExistingAudioFiles(self):
fullpath = os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO)
for file in os.listdir(fullpath):
if file.endswith(".mp3"):
currentfile = MediaAnnotationBundle(self._existingAudioFileFullpath(file))
self.mediaAnnotationBundles.append(currentfile)
self.logger.debug("Found {} audiofiles to process".format(len(self.mediaAnnotationBundles)))
pass
def _readExistingSpeakerMetadata(self, ):
existing_audio_identifier = self._getFilenamesFromMediaAnnotationBundles()
common_voice_valid_metadata = self._getCommonVoiceValidMetadata(
existing_audio_identifier, self._validateKorpusPath())
self._enrichWithTranscription(common_voice_valid_metadata)
self._extractMediaSessionActors(common_voice_valid_metadata)
def _enrichWithTranscription(self, common_voice_valid_metadata):
self.mediaAnnotationBundles_dictionary_withoutExtension = {self._getFilenameWithoutExtension(x.identifier): x for x
in self.mediaAnnotationBundles}
self.mediaAnnotationBundles_dictionary_withExtension = {self._getFilenameWithExtension(x.identifier): x for x in
self.mediaAnnotationBundles}
common_voice_valid_metadata.apply(self._enrichWithTranscriptionInner, axis=1)
pass
def _enrichWithTranscriptionInner(self, row):
currentMediaAnnotationBundle = self.mediaAnnotationBundles_dictionary_withoutExtension.get(row.path,
self.mediaAnnotationBundles_dictionary_withExtension.get(
row.path))
currentMediaAnnotationBundle.setWrittenResource(
WrittenResource(row.sentence, row.client_id, self.LANGUAGECODE_DE))
currentMediaAnnotationBundle.setMediaFile(MediaFile(row.client_id))
self.logger.debug(
"Found matching media-annotation bundle for identifier {} and path {}".format(row.client_id, row.path))
def _extractMediaSessionActors(self, common_voice_valid_metadata):
common_voice_valid_metadata.apply(self._createMediaSessionActorFromRow, axis=1)
self.logger.debug("Found {} Speakers".format(len(self.mediaSessionActors)))
pass
def _createMediaSessionActorFromRow(self, row):
self.mediaSessionActors.add(MediaSessionActor(row.client_id, Sex.toSexEnum(row.gender), row.age))
pass
def _getCommonVoiceValidMetadata(self, existing_audio_identifier,
korpus_path):
commonvoice_valid_metadatafilenames = ["dev.tsv", "test.tsv", "train.tsv", "validated.tsv"]
combined_csv = pd.concat(
[pd.read_csv(os.path.join(korpus_path, f), sep="\t", header=0) for f in commonvoice_valid_metadatafilenames])
common_voice_valid_metadata = combined_csv[combined_csv.path.isin(existing_audio_identifier)]
common_voice_valid_metadata = self._fixChangeInDataFormatCommonVoice(common_voice_valid_metadata, combined_csv)
return common_voice_valid_metadata
def _getFilenamesFromMediaAnnotationBundles(self):
return [os.path.splitext(os.path.basename(base.identifier))[0] for base in
self.mediaAnnotationBundles]
def _getFilenamesFromMediaAnnotationBundlesWithExtension(self):
return [os.path.basename(base.identifier) for base in self.mediaAnnotationBundles]
def _persistMetamodel(self):
# TODO actual persisting of working json
# Actual json output
# print(json.dumps(self.mediaAnnotationBundles, default=lambda o: o.__dict__, sort_keys=True, indent=4))
pass
def _buildMediaSession(self):
actors = MediaSessionActors(self.mediaSessionActors)
session = MediaSession(self.ADAPTERNAME, actors, self.mediaAnnotationBundles)
# TODO Validate
self.mediaSession = session
pass
def _fixChangeInDataFormatCommonVoice(self, common_voice_valid_metadata, combined_csv):
if (len(common_voice_valid_metadata) == 0):
self.logger.debug(
"CommonVoice tsv-files seem to have filename-extension set (new fileformat). Trying matching with extension")
common_voice_valid_metadata = combined_csv[
combined_csv.path.isin(self._getFilenamesFromMediaAnnotationBundlesWithExtension())]
self.logger.debug(
"CommonVoice Valid metadata length is: {}".format(len(common_voice_valid_metadata)))
return common_voice_valid_metadata
| 48.312907
| 152
| 0.725658
| 3,491
| 37,056
| 7.576626
| 0.185907
| 0.018904
| 0.019282
| 0.014518
| 0.278034
| 0.233611
| 0.192741
| 0.17569
| 0.16276
| 0.15913
| 0
| 0.005745
| 0.178028
| 37,056
| 766
| 153
| 48.375979
| 0.862602
| 0.037106
| 0
| 0.245161
| 0
| 0.001613
| 0.106961
| 0.014984
| 0
| 0
| 0
| 0.002611
| 0
| 1
| 0.112903
| false
| 0.009677
| 0.022581
| 0.009677
| 0.275806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0beee4ee459f085172a97b3c88ddde9059df51b
| 14,085
|
py
|
Python
|
development/multiImage_pytorch/experiment.py
|
anaikawadi/svbrdf-estimation
|
c977aa8448b2131af3960895afd1105d29e5484a
|
[
"MIT"
] | null | null | null |
development/multiImage_pytorch/experiment.py
|
anaikawadi/svbrdf-estimation
|
c977aa8448b2131af3960895afd1105d29e5484a
|
[
"MIT"
] | null | null | null |
development/multiImage_pytorch/experiment.py
|
anaikawadi/svbrdf-estimation
|
c977aa8448b2131af3960895afd1105d29e5484a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import math
import shutil
import torch
from accelerate import Accelerator
from tensorboardX import SummaryWriter
from cli import parse_args
from dataset import SvbrdfDataset
from losses import MixedLoss, MixedLoss2, MixedLoss3
from models import MultiViewModel, SingleViewModel
from pathlib import Path
from persistence import Checkpoint
from renderers import LocalRenderer, RednerRenderer
import utils
import environment as env
import numpy as np
import sys
from PIL import Image
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
args = parse_args()
clean_training = args.mode == 'train' and args.retrain
# Load the checkpoint
checkpoint_dir = Path(args.model_dir)
checkpoint = Checkpoint()
if not clean_training:
checkpoint = Checkpoint.load(checkpoint_dir)
# Immediatly restore the arguments if we have a valid checkpoint
if checkpoint.is_valid():
args = checkpoint.restore_args(args)
# Make the result reproducible
utils.enable_deterministic_random_engine()
# Determine the device
accelerator = Accelerator()
device = accelerator.device
# Create the model
model = MultiViewModel(use_coords=args.use_coords).to(device)
if checkpoint.is_valid():
model = checkpoint.restore_model_state(model)
elif args.mode == 'test':
print("No model found in the model directory but it is required for testing.")
exit(1)
# TODO: Choose a random number for the used input image count if we are training and we don't request it to be fix (see fixImageNb for reference)
data = SvbrdfDataset(data_directory=args.input_dir,
image_size=args.image_size, scale_mode=args.scale_mode, input_image_count=args.image_count, used_input_image_count=args.used_image_count,
use_augmentation=True, mix_materials=args.mode == 'train',
no_svbrdf=args.no_svbrdf_input, is_linear=args.linear_input)
epoch_start = 0
# model.generator.delete()
# model = torch.nn.Sequential(
# *list(model.children())[:-8],
# )
# print(*list(model.parameters()))
if args.mode == 'train':
validation_split = 0.01
print("Using {:.2f} % of the data for validation".format(
round(validation_split * 100.0, 2)))
training_data, validation_data = torch.utils.data.random_split(data, [int(math.ceil(
len(data) * (1.0 - validation_split))), int(math.floor(len(data) * validation_split))])
print("Training samples: {:d}.".format(len(training_data)))
print("Validation samples: {:d}.".format(len(validation_data)))
training_dataloader = torch.utils.data.DataLoader(
training_data, batch_size=8, pin_memory=True, shuffle=True)
validation_dataloader = torch.utils.data.DataLoader(
validation_data, batch_size=8, pin_memory=True, shuffle=False)
batch_count = int(math.ceil(len(training_data) /
training_dataloader.batch_size))
# Train as many epochs as specified
epoch_end = args.epochs
print("Training from epoch {:d} to {:d}".format(epoch_start, epoch_end))
# Set up the optimizer
# TODO: Use betas=(0.5, 0.999)
L = torch.FloatTensor(5, 3).uniform_(0.2, 1.0)
L = L / torch.linalg.norm(L, ord=2, dim=-1, keepdim=True)
L[:, :2] = 2.0 * L[:, :2] - 1.0
V = torch.FloatTensor(1, 3).uniform_(0.2, 1.0)
V = V / torch.linalg.norm(V, ord=2, dim=-1, keepdim=True)
V[:, :2] = 2.0 * V[:, :2] - 1.0
scenes = env.generate_specific_scenes(5, L, L)
L.requires_grad = True
VIP = [L]
# V.requires_grad = True
optimizer = torch.optim.Adam(VIP, lr=0.1)
model, optimizer, training_dataloader, validation_dataloader = accelerator.prepare(
model, optimizer, training_dataloader, validation_dataloader)
# print("scene", scene.camera)
# TODO: Use scheduler if necessary
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')
# Set up the loss
loss_renderer = LocalRenderer()
loss_function = MixedLoss2(loss_renderer, scenes)
# Setup statistics stuff
statistics_dir = checkpoint_dir / "logs"
if clean_training and statistics_dir.exists():
# Nuke the stats dir
shutil.rmtree(statistics_dir)
statistics_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(str(statistics_dir.absolute()))
last_batch_inputs = None
# Clear checkpoint in order to free up some memory
checkpoint.purge()
lights = []
losses = []
for epoch in range(epoch_start, epoch_end):
for i, batch in enumerate(training_dataloader):
# Unique index of this batch
print("Ldet", (L.detach().numpy())[0])
lights.append(((L.detach().numpy())[0]).tolist())
scenes = env.generate_specific_scenes(5, L, L)
print("L", L)
# if(epoch_end - epoch < 3):
loss_function = MixedLoss2(loss_renderer, scenes)
# else:
# loss_function = MixedLoss2(loss_renderer, scene[0])
batch_index = epoch * batch_count + i
# Construct inputs
batch_inputs = batch["inputs"].to(device)
batch_svbrdf = batch["svbrdf"].to(device)
# Perform a step
optimizer.zero_grad()
outputs = model(batch_inputs)
print("batch_inputs", batch_inputs.size())
print("batch_svbrdfs", batch_svbrdf.size())
print("batch_outputs", outputs.size())
loss = loss_function(outputs, batch_svbrdf)
accelerator.backward(loss)
optimizer.step()
print("Epoch {:d}, Batch {:d}, loss: {:f}".format(
epoch, i + 1, loss.item()))
losses.append((epoch, loss.item()))
# Statistics
writer.add_scalar("loss", loss.item(), batch_index)
last_batch_inputs = batch_inputs
lights.append(((L.detach().numpy())[0]).tolist())
with open('/content/experiment1/losses/loss.txt', "w") as text_file:
text_file.write(str(losses))
print("lights1", lights)
# print(len(lights))
lights2 = []
for j in range(len(lights)):
if j%10 == 0:
lights2.append(lights[j])
# print("lights2", lights)
# l=np.array(lights)
l = np.array(lights2)
renderer = LocalRenderer()
rendered_scene = env.generate_specific_scenes(1, L.detach(), L.detach())
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render1.png')
img = renderer.render(rendered_scene[0], outputs[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render2.png')
# print("size", batch_inputs.size())
torch.add(L, 5)
print("L", L)
rendered_scene = env.generate_specific_scenes(1, L, L)
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render3.png')
print("size", batch_inputs[0][0].size())
img = batch_inputs[0][0]
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render4.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_specular.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(batch_svbrdf[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_specular.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/target_normal.png', '/content/experiment1/figures/target_diffuse.png', '/content/experiment1/figures/target_roughness.png', '/content/experiment1/figures/target_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/target_svbrdf.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/output_normal.png', '/content/experiment1/figures/output_diffuse.png', '/content/experiment1/figures/output_roughness.png', '/content/experiment1/figures/output_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/output_svbrdf.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
print("lights3", l)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter([0.0], [0.0], [0.0], marker='o', c='r')
# v = V.detach().numpy()
ax.scatter(l[:,0], l[:,1], l[:,2], marker='.', c='g')
# ax.scatter(v[:,0], v[:,1], v[:,2], marker='^', c='b')
ax.set_xlim(-8, 8)
ax.set_ylim(-8, 8)
ax.set_zlim(-8., 8.)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# plt.show()
plt.savefig('/content/experiment1/figures/light.png')
plt.show()
# if epoch % args.save_frequency == 0:
# Checkpoint.save(checkpoint_dir, args, model, optimizer, epoch)
# if epoch % args.validation_frequency == 0 and len(validation_data) > 0:
# model.eval()
# val_loss = 0.0
# batch_count_val = 0
# for batch in validation_dataloader:
# # Construct inputs
# batch_inputs = batch["inputs"].to(device)
# batch_svbrdf = batch["svbrdf"].to(device)
# outputs = model(batch_inputs)
# val_loss += loss_function(outputs, batch_svbrdf).item()
# batch_count_val += 1
# val_loss /= batch_count_val
# print("Epoch {:d}, validation loss: {:f}".format(epoch, val_loss))
# writer.add_scalar("val_loss", val_loss, epoch * batch_count)
# model.train()
| 34.437653
| 244
| 0.637061
| 1,956
| 14,085
| 4.443763
| 0.166155
| 0.051772
| 0.069029
| 0.051542
| 0.518178
| 0.503106
| 0.431661
| 0.424528
| 0.396341
| 0.396341
| 0
| 0.022544
| 0.209514
| 14,085
| 408
| 245
| 34.522059
| 0.758128
| 0.177778
| 0
| 0.436508
| 0
| 0
| 0.133664
| 0.098443
| 0
| 0
| 0
| 0.002451
| 0
| 1
| 0.007937
| false
| 0
| 0.071429
| 0.003968
| 0.087302
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c1ea88aed755291844e1e991a6d2f5cdb34cdd
| 8,924
|
py
|
Python
|
advent_of_code/2019/11_space_police/aoc_2019_11.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
advent_of_code/2019/11_space_police/aoc_2019_11.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
advent_of_code/2019/11_space_police/aoc_2019_11.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
''' Advent of code 2019 Day 11 - Space police '''
from typing import NamedTuple
from enum import Enum
INPUT_FILE=__file__.replace('.py', '.dat')
def to_number(digits: list) -> int:
return int(''.join(map(str, digits)))
def to_list(number: int) -> list:
return [int(i) for i in str(number)]
def get_modes(instruction: int, parameter_count: int = 3) -> list:
params = instruction // 100
string = str(params).zfill(parameter_count)
return list(reversed(to_list(string)))
def get_dict(lst: list):
return {k: v for k,v in enumerate(lst)}
def get_value(code: dict, key: int):
if key in code:
return code[key]
else:
return 0
def run_program(code: dict, inputs: list) -> int:
code = code.copy()
output = 0
pos = 0
base = 0
counter = 0
while (code[pos] % 100) != 99:
instruction = code[pos + 0]
params = []
for i in range(3):
try:
param = code[pos + 1 + i]
except:
param = None
params.append(param)
operation = instruction % 100
modes = get_modes(instruction)
values = [0] * 2
# Addition
if operation == 1:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] + values[1]
else:
code[params[2] + base] = values[0] + values[1]
pos += 4
# Multiplication
elif operation == 2:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] * values[1]
else:
code[params[2] + base] = values[0] * values[1]
pos += 4
# Store input
elif operation == 3:
if modes[0] == 0:
code[params[0]] = inputs.pop(0)
elif modes[0] == 2:
code[params[0] + base] = inputs.pop(0)
else:
raise RuntimeError("fail")
pos += 2
# Get output
elif operation == 4:
if modes[0] == 0:
values[0] = get_value(code, params[0])
elif modes[0] == 1:
values[0] = params[0]
elif modes[0] == 2:
values[0] = get_value(code, params[0] + base)
yield values[0]
pos += 2
# Jump if true
elif operation == 5:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] != 0:
pos = values[1]
else:
pos += 3
# Jump if false
elif operation == 6:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == 0:
pos = values[1]
else:
pos += 3
# Less than
elif operation == 7:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] < values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Equals
elif operation == 8:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Relative base shift
elif operation == 9:
i = 0
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
base += values[i]
pos += 2
else:
raise RuntimeError(f"error in operation: {pos}")
class Point(NamedTuple):
X: int
Y: int
class Direction(Enum):
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
def run_robot(code: dict, start_on_white: bool = False) -> int:
DIRECTIONS_COUNT = 4
direction = Direction.UP
panels = {}
seen = set()
color = []
position = Point(0, 0)
if start_on_white:
panels[position] = 1
finished = False
brain = run_program(code, color)
while True:
try:
# Sense the color on the point. Default is black (0).
if position in panels:
color.append(panels[position])
else:
color.append(0)
paint = next(brain)
rotation = next(brain)
if paint == "" or rotation == "":
raise RuntimeError(f"Failed to read paint: {paint}, rotation: {rotation}")
# Paints the panel.
panels[position] = paint
# Keeps track of all visited points.
seen.add(position)
# Turn left (0) or right (1).
if rotation == 0:
direction = Direction((direction.value + 1) % DIRECTIONS_COUNT)
elif rotation == 1:
direction = Direction((direction.value - 1) % DIRECTIONS_COUNT)
# Move a step forward.
if direction == Direction.UP:
position = Point(position.X, position.Y - 1)
elif direction == Direction.LEFT:
position = Point(position.X - 1, position.Y)
elif direction == Direction.DOWN:
position = Point(position.X, position.Y + 1)
elif direction == Direction.RIGHT:
position = Point(position.X + 1, position.Y)
else:
raise RuntimeError(f"Wrong direction: {direction}")
except StopIteration:
return panels
def print_panels(panels: dict):
min_x = min(panels, key=lambda panel: panel.X).X
max_x = max(panels, key=lambda panel: panel.X).X
min_y = min(panels, key=lambda panel: panel.Y).Y
max_y = max(panels, key=lambda panel: panel.Y).Y
print(f"{min_x} {max_x} {min_y} {max_y}")
for y in range(min_y, max_y + 1):
row = []
for x in range(min_x, max_x + 1):
point = Point(x, y)
if point in panels:
if panels[Point(x, y)] == 1:
row.append("#")
else:
row.append(" ")
else:
row.append(" ")
print(''.join(row))
# Read the input
with open(INPUT_FILE) as f:
input_dict = get_dict(list(map(int, f.read().strip().split(','))))
# Part 1 solution
panels_count = len(run_robot(input_dict))
print(f"Part 1: {panels_count}")
# Part 2 solution
panels = run_robot(input_dict, True)
print(f"Part 2:")
print_panels(panels)
| 29.647841
| 91
| 0.438256
| 1,019
| 8,924
| 3.776251
| 0.155054
| 0.077963
| 0.053015
| 0.074844
| 0.442827
| 0.423337
| 0.420218
| 0.337058
| 0.337058
| 0.337058
| 0
| 0.034712
| 0.444756
| 8,924
| 300
| 92
| 29.746667
| 0.741877
| 0.039892
| 0
| 0.435556
| 0
| 0
| 0.02171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035556
| false
| 0
| 0.008889
| 0.013333
| 0.111111
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c4cc4a632d487744596824e2338a9f0399ee17
| 814
|
py
|
Python
|
nicos_mlz/mira/setups/mezeiflip.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_mlz/mira/setups/mezeiflip.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_mlz/mira/setups/mezeiflip.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
description = 'Mezei spin flipper using TTI power supply'
group = 'optional'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
dct1 = device('nicos.devices.entangle.PowerSupply',
description = 'current in first channel of supply (flipper current)',
tangodevice = tango_base + 'tti1/out1',
timeout = 1,
precision = 0.01,
),
dct2 = device('nicos.devices.entangle.PowerSupply',
description = 'current in second channel of supply (compensation current)',
tangodevice = tango_base + 'tti1/out2',
timeout = 1,
precision = 0.01,
),
flip = device('nicos.devices.polarized.MezeiFlipper',
description = 'Mezei flipper before sample (in shielding table)',
flip = 'dct1',
corr = 'dct2',
),
)
| 32.56
| 83
| 0.63145
| 88
| 814
| 5.806818
| 0.545455
| 0.052838
| 0.105675
| 0.101761
| 0.422701
| 0.223092
| 0.223092
| 0.223092
| 0
| 0
| 0
| 0.035948
| 0.248157
| 814
| 24
| 84
| 33.916667
| 0.79902
| 0
| 0
| 0.318182
| 0
| 0
| 0.460688
| 0.174447
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c51c1373dbb36d56025f69dde451b4d208bab8
| 16,817
|
py
|
Python
|
mars/learn/cluster/_k_means_init.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/learn/cluster/_k_means_init.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/learn/cluster/_k_means_init.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes
from ... import tensor as mt
from ...core import OutputType, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import KeyField, Int32Field
from ...tensor.array_utils import as_same_device, device
from ...tensor.core import TensorOrder
from ...tensor.random import RandomStateField
from ...utils import has_unknown_shape
from ..metrics import euclidean_distances
from ..operands import LearnOperand, LearnOperandMixin
def _kmeans_plus_plus_init(X,
x_squared_norms,
random_state,
n_clusters: int,
n_local_trials: int = None):
n_samples, n_features = X.shape
centers = mt.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if X.issparse(): # pragma: no cover
centers[0] = X[center_id].todense()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, mt.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = mt.searchsorted(closest_dist_sq.cumsum(),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
candidate_ids = mt.clip(candidate_ids, None, closest_dist_sq.size - 1)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
distance_to_candidates = mt.minimum(closest_dist_sq, distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = mt.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if X.issparse(): # pragma: no cover
c_center = X[best_candidate].todense()
else:
c_center = X[best_candidate]
centers[c] = c_center
return centers
class KMeansPlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_n_local_trials = Int32Field('n_local_trials')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, n_local_trials=None, output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _n_local_trials=n_local_trials,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def n_local_trials(self):
return self._n_local_trials
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._x = self._inputs[0]
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def _tile_one_chunk(cls, op: "KMeansPlusPlusInit"):
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_kw = out.params.copy()
chunk_kw['index'] = (0, 0)
chunk_inputs = [op.x.chunks[0], op.x_squared_norms.chunks[0]]
chunk = chunk_op.new_chunk(chunk_inputs, kws=[chunk_kw])
kw = out.params
kw['chunks'] = [chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansPlusPlusInit"):
if len(op.x.chunks) == 1:
assert len(op.x_squared_norms.chunks) == 1
return cls._tile_one_chunk(op)
else:
return (yield from cls._tile_k_init(op))
@classmethod
def _tile_k_init(cls, op: "KMeansPlusPlusInit"):
X = op.x
n_clusters = op.n_clusters
x_squared_norms = op.x_squared_norms
random_state = op.state
n_local_trials = op.n_local_trials
centers = _kmeans_plus_plus_init(X, x_squared_norms, random_state,
n_clusters, n_local_trials)
return (yield from recursive_tile(centers))
@classmethod
def execute(cls, ctx, op: "KMeansPlusPlusInit"):
try:
from sklearn.cluster._kmeans import _kmeans_plusplus
except ImportError: # pragma: no cover
try:
from sklearn.cluster._kmeans import _k_init
except ImportError:
from sklearn.cluster.k_means_ import _k_init
def _kmeans_plusplus(*args, **kwargs):
return _k_init(*args, **kwargs), None
(x, x_squared_norms), device_id, _ = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = _kmeans_plusplus(
x, op.n_clusters, x_squared_norms=x_squared_norms, random_state=op.state,
n_local_trials=op.n_local_trials)[0]
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
op = KMeansPlusPlusInit(x=X, n_clusters=n_clusters, x_squared_norms=x_squared_norms,
state=random_state, n_local_trials=n_local_trials)
return op()
class KMeansScalablePlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_init_iter = Int32Field('init_iter')
_oversampling_factor = Int32Field('oversampling_factor')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, init_iter=None, oversampling_factor=None,
output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _init_iter=init_iter,
_oversampling_factor=oversampling_factor,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def init_iter(self):
return self._init_iter
@property
def oversampling_factor(self):
return self._oversampling_factor
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if self._x is not None:
self._x = self._inputs[0]
if self._x_squared_norms is not None:
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansScalablePlusPlusInit"):
if has_unknown_shape(*op.inputs):
yield
x = mt.tensor(op.x)
x_squared_norms = mt.atleast_2d(op.x_squared_norms)
out = op.outputs[0]
random_state = op.state
rs = mt.random.RandomState.from_numpy(random_state)
n_samples, n_features = x.shape
n_clusters = op.n_clusters
# step 1, sample a centroid
centers = x[random_state.randint(n_samples, size=1)]
for _ in range(op.init_iter):
distances = euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True)
# calculate the cost of data with respect to current centers
cost = mt.sum(mt.min(distances, axis=1))
# calculate the distribution to sample new centers
distribution = mt.full(len(distances), 1 / len(distances))
mt.true_divide(mt.min(distances, axis=1), cost,
where=cost != 0, out=distribution)
# pick new centers
new_centers_size = op.oversampling_factor * n_clusters
new_centers = x[rs.choice(n_samples, new_centers_size, p=distribution)]
centers = mt.concatenate([centers, new_centers])
# rechunk centers into one chunk
centers = (yield from recursive_tile(centers)).rechunk(centers.shape)
distances = yield from recursive_tile(euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True))
map_index_to_chunks = {}
# calculate weight for each chunk
for c in distances.chunks:
map_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.map)
map_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': c.index
}
map_chunk = map_chunk_op.new_chunk([c], kws=[map_chunk_kw])
map_index_to_chunks[c.index] = map_chunk
combine_chunks = []
for i in range(distances.chunk_shape[0]):
map_chunks = [map_index_to_chunks[i, j]
for j in range(distances.chunk_shape[1])]
combine_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.combine)
combine_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': (i,)
}
combine_chunk = combine_chunk_op.new_chunk(
map_chunks, kws=[combine_chunk_kw])
combine_chunks.append(combine_chunk)
reduce_chunk_op = KMeansScalablePlusPlusInit(n_clusters=op.n_clusters,
state=random_state,
stage=OperandStage.reduce)
reduce_chunk_kw = out.params
reduce_chunk_kw['index'] = (0, 0)
reduce_chunk = reduce_chunk_op.new_chunk([centers.chunks[0]] + combine_chunks,
kws=[reduce_chunk_kw])
new_op = op.copy()
kw = out.params
kw['chunks'] = [reduce_chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def _execute_map(cls, ctx, op: "KMeansScalablePlusPlusInit"):
distances = ctx[op.inputs[0].key]
min_distance_ids = np.argmin(distances, axis=1)
min_distances = distances[range(len(distances)), min_distance_ids]
ctx[op.outputs[0].key] = (min_distances, min_distance_ids)
@classmethod
def _execute_combine(cls, ctx, op: "KMeansScalablePlusPlusInit"):
out = op.outputs[0]
all_distances, all_min_distance_ids = tuple(zip(*(ctx[inp.key] for inp in op.inputs)))
distances = np.stack(all_distances).T
min_distance_ids = np.stack(all_min_distance_ids).T
combined_min_distance_id = np.argmin(distances, axis=1)
min_distance_ids = min_distance_ids[range(len(distances)), combined_min_distance_id]
count = np.bincount(min_distance_ids)
result = np.zeros(out.shape[0], dtype=np.int64)
result[:len(count)] = count
ctx[out.key] = result
@classmethod
def _execute_reduce(cls, ctx, op: "KMeansScalablePlusPlusInit"):
from sklearn.cluster import KMeans
inputs = [ctx[inp.key] for inp in op.inputs]
count = np.zeros(inputs[1].shape[0], dtype=np.int64)
for inp in inputs[1:]:
count += inp
weight = count / count.sum()
centers = inputs[0]
kmeans = KMeans(n_clusters=op.n_clusters, n_init=1,
random_state=op.state)
kmeans.fit(centers, sample_weight=weight)
ctx[op.outputs[0].key] = kmeans.cluster_centers_
@classmethod
def execute(cls, ctx, op: "KMeansScalablePlusPlusInit"):
if op.stage == OperandStage.map:
return cls._execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls._execute_combine(ctx, op)
else:
return cls._execute_reduce(ctx, op)
def _scalable_k_init(X, n_clusters, x_squared_norms, random_state,
oversampling_factor=2, init_iter=5):
op = KMeansScalablePlusPlusInit(x=X, n_clusters=n_clusters,
x_squared_norms=x_squared_norms,
state=random_state, init_iter=init_iter,
oversampling_factor=oversampling_factor)
return op()
| 37.288248
| 94
| 0.634477
| 2,115
| 16,817
| 4.770213
| 0.169267
| 0.034097
| 0.055407
| 0.01685
| 0.384577
| 0.316186
| 0.267916
| 0.256021
| 0.245019
| 0.21905
| 0
| 0.007272
| 0.272225
| 16,817
| 450
| 95
| 37.371111
| 0.81706
| 0.16846
| 0
| 0.364548
| 0
| 0
| 0.032209
| 0.009452
| 0
| 0
| 0
| 0
| 0.006689
| 1
| 0.100334
| false
| 0
| 0.060201
| 0.040134
| 0.294314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c54a43da9d2d5736bbbaf25b05dc7746829f11
| 2,157
|
py
|
Python
|
wikipedia_parser/infobox/wikitext_parser.py
|
ojones/wikipedia_parser
|
db548290fbc392299bba8adfda9fe18baa1e66fe
|
[
"MIT"
] | 9
|
2016-02-24T20:09:26.000Z
|
2019-03-10T11:33:34.000Z
|
wikipedia_parser/infobox/wikitext_parser.py
|
ojones/wikipedia_parser
|
db548290fbc392299bba8adfda9fe18baa1e66fe
|
[
"MIT"
] | 1
|
2019-02-13T17:38:50.000Z
|
2019-02-13T17:38:50.000Z
|
wikipedia_parser/infobox/wikitext_parser.py
|
ojones/wikipedia_parser
|
db548290fbc392299bba8adfda9fe18baa1e66fe
|
[
"MIT"
] | 1
|
2016-04-05T05:28:51.000Z
|
2016-04-05T05:28:51.000Z
|
import re
from wikipedia_parser.infobox import clean_text as clean_help
from wikipedia_parser.infobox import wikitext_helpers as wtext_help
from wikipedia_parser.third_party_adapters import parserfromhell_adapter as adapter
__author__ = 'oswaldjones'
def get_simple_text(wtext, key, clean=True):
text = None
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try getting from parserfromhell
if not text and template_dict:
text = template_dict.get(possible_key)
# final attempt if still no text
if not text and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=", 1)
if len(key_val) == 2:
text = key_val[1].strip()
if text and clean:
text = clean_help.clean_text(text)
return text
def extract_page_links(wtext, key):
links = []
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try parserfromhell
if not links and template_dict:
if template_dict.get(possible_key):
matches = re.findall("\[\[(.*?)\]\]", template_dict.get(possible_key))
links = [link.split("|", 1)[0] for link in matches]
# final attempt if still no links
if not links and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext_lines, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=")
if len(key_val) == 2:
matches = re.findall("\[\[(.*?)\]\]", key_val[1].strip())
links = [link.split("|", 1)[0] for link in matches]
return links
| 30.380282
| 90
| 0.592953
| 276
| 2,157
| 4.391304
| 0.23913
| 0.089109
| 0.049505
| 0.056931
| 0.615512
| 0.443894
| 0.443894
| 0.443894
| 0.443894
| 0.391089
| 0
| 0.006077
| 0.313398
| 2,157
| 70
| 91
| 30.814286
| 0.812289
| 0.052388
| 0
| 0.380952
| 0
| 0
| 0.027014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c98eb51566d8d4d1edda624372a00af1731e11
| 1,339
|
py
|
Python
|
src/video_transcoding/defaults.py
|
tumb1er/django-video-transcoding
|
54c85fb4a3b58b3f3b82e461b2f54f3c8dd5fcc6
|
[
"MIT"
] | 21
|
2020-02-07T17:40:16.000Z
|
2021-09-02T18:56:21.000Z
|
src/video_transcoding/defaults.py
|
just-work/django-video-transcoding
|
c88d88de8301cd65eda95db941d72028aac57aa9
|
[
"MIT"
] | 184
|
2020-02-09T10:46:17.000Z
|
2022-03-28T00:53:04.000Z
|
src/video_transcoding/defaults.py
|
just-work/django-video-transcoding
|
c88d88de8301cd65eda95db941d72028aac57aa9
|
[
"MIT"
] | 6
|
2020-02-07T13:58:33.000Z
|
2021-07-27T16:24:56.000Z
|
from os import getenv as e
from kombu import Queue
CELERY_APP_NAME = 'video_transcoding'
VIDEO_TRANSCODING_CELERY_CONF = {
'broker_url': e('VIDEO_TRANSCODING_CELERY_BROKER_URL',
'amqp://guest:guest@rabbitmq:5672/'),
'result_backend': e('VIDEO_TRANSCODING_CELERY_RESULT_BACKEND', None),
'task_default_exchange': CELERY_APP_NAME,
'task_default_exchange_type': 'topic',
'task_default_queue': CELERY_APP_NAME,
'worker_prefetch_multiplier': 1,
'worker_concurrency': e('VIDEO_TRANSCODING_CELERY_CONCURRENCY'),
'task_acks_late': True,
'task_reject_on_worker_lost': True,
'task_queues': [
Queue(CELERY_APP_NAME, routing_key=CELERY_APP_NAME),
]
}
# Directory for large output files
VIDEO_TEMP_DIR = '/tmp'
# Download source before processing
VIDEO_DOWNLOAD_SOURCE = bool(int(e('VIDEO_DOWNLOAD_SOURCE', 0)))
# A list of WebDAV endpoints for storing video results
VIDEO_ORIGINS = e('VIDEO_ORIGINS',
'http://storage.localhost:8080/videos/').split(',')
# Video streamer public urls (comma-separated)
VIDEO_EDGES = e('VIDEO_EDGES', 'http://storage.localhost:8080/').split(',')
# Edge video manifest url template
VIDEO_URL = '{edge}/hls/{filename}1080p.mp4/index.m3u8'
# Output source files checksum
CHECKSUM_SOURCE = bool(int(e('CHECKSUM_SOURCE', 0)))
| 31.139535
| 75
| 0.726662
| 175
| 1,339
| 5.228571
| 0.497143
| 0.039344
| 0.071038
| 0.059016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019366
| 0.151606
| 1,339
| 42
| 76
| 31.880952
| 0.786092
| 0.168783
| 0
| 0
| 0
| 0
| 0.472875
| 0.274864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c9c572e013959cc1791ab9408e2433e6b096c4
| 5,104
|
py
|
Python
|
wordSenseByContext.py
|
jmboettcher/fall2019_sentiment_in_alternative_words
|
d88fd0ed7d1396bb3755431d6aff85b880ffe149
|
[
"Apache-2.0"
] | null | null | null |
wordSenseByContext.py
|
jmboettcher/fall2019_sentiment_in_alternative_words
|
d88fd0ed7d1396bb3755431d6aff85b880ffe149
|
[
"Apache-2.0"
] | null | null | null |
wordSenseByContext.py
|
jmboettcher/fall2019_sentiment_in_alternative_words
|
d88fd0ed7d1396bb3755431d6aff85b880ffe149
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from nltk.tokenize import sent_tokenize
from nltk.corpus import wordnet as wn
from nltk.corpus import semcor as sc
from nltk.corpus import stopwords
import mywordtokenizer
class SenseContextWordDict:
def __init__(self):
self.dictionary = self._create_dictionary()
def _create_dictionary(self):
dictionary = defaultdict(lambda: defaultdict(int))
myStopWords = stopwords.words('english')
for sentence in sc.tagged_sents(tag='sem'):
plainWordSent = []
taggedWordSent = []
self._make_word_lists(plainWordSent, taggedWordSent, sentence)
for taggedItemTuple in taggedWordSent:
self._update_tagged_item_entry(myStopWords, dictionary, plainWordSent, taggedItemTuple[0],taggedItemTuple[1])
return dictionary
def _make_word_lists(self, plainWordSent, taggedWordSent, sentence):
for i in range(0,len(sentence)):
item = sentence[i]
if(type(item)) == list:
plainWordSent.append(item[0])
else:
if type(item.label()) == str:
plainWordSent.append(item.leaves()[0])
else:
plainWordSent.append(item.label().name())
taggedWordSent.append([item, i])
def _update_tagged_item_entry(self, myStopWords,dictionary,plainWordSent,taggedItem,taggedItemPosition):
for j in range(0,len(plainWordSent)):
word = plainWordSent[j]
if taggedItem.label().name() != word:
taggedSynset = taggedItem.label().synset()
splitUp = word.split("_")
for thisword in splitUp:
wordTokened = mywordtokenizer.simple(thisword)
if len(wordTokened) > 0:
word = wordTokened[0]
if word not in myStopWords:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
dictionary[taggedSynset][".totalNoStops."]+=1
elif abs(j - taggedItemPosition) == 1:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
def getMostLikelySynset(self, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myStopWords = stopwords.words('english')
highestCoverageSyn = self._synset_search(".totalNoStops.", myStopWords, word, sentence)
if highestCoverageSyn is None:
highestCoverageSyn = self._synset_search(".total.", [], word, sentence)
return highestCoverageSyn
def _synset_search(self, totalToUse, exclusionSet, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myMap = self.dictionary
highestCoverage = 0
highestCoverageSyn = None
for syn in wn.synsets(word):
totalContextWordMatches = 0
totalSet = myMap[syn][totalToUse]
if totalSet > 0:
for contextWord in sentence:
if contextWord != word and contextWord not in exclusionSet:
totalContextWordMatches += myMap[syn][contextWord]
coverage = totalContextWordMatches / totalSet
if coverage > highestCoverage:
highestCoverage = coverage
highestCoverageSyn = syn
return highestCoverageSyn
def listAlternatives(self, word, sentence):
synonyms = set([])
mostLikelySynset = self.getMostLikelySynset(word, sentence)
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
synonyms.add(synonym.name())
return synonyms
def mostFrequentAlternative(self, word, sentence):
mostLikelySynset = self.getMostLikelySynset(word, sentence)
highestCount = 0
mostFrequentAlternative = None
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
count = synonym.count()
if count > highestCount:
mostFrequentAlternative = synonym.name()
highestCount = count
return mostFrequentAlternative
"""===================================================================
Place all function calls below the following conditional so that they
are called only if this module is called with
`python ling278_assign02.py`
No functions should execute if it is instead imported with
import ling278_assign02
in the interactive shell.
"""
if __name__ == '__main__':
pass
| 36.985507
| 125
| 0.587187
| 464
| 5,104
| 6.37069
| 0.303879
| 0.032476
| 0.031123
| 0.020298
| 0.194181
| 0.159675
| 0.159675
| 0.159675
| 0.122463
| 0.122463
| 0
| 0.008037
| 0.317398
| 5,104
| 137
| 126
| 37.255474
| 0.840413
| 0.063284
| 0
| 0.179775
| 0
| 0
| 0.017418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089888
| false
| 0.011236
| 0.067416
| 0
| 0.224719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0c9fc5ceee51e40ba7758705226014b71dd06d7
| 3,138
|
py
|
Python
|
paymentmethods/stripejs/tests.py
|
tjwalch/django-restshop
|
569b57a5694e76a365556d7c4c9a97dd293d96c6
|
[
"MIT"
] | null | null | null |
paymentmethods/stripejs/tests.py
|
tjwalch/django-restshop
|
569b57a5694e76a365556d7c4c9a97dd293d96c6
|
[
"MIT"
] | null | null | null |
paymentmethods/stripejs/tests.py
|
tjwalch/django-restshop
|
569b57a5694e76a365556d7c4c9a97dd293d96c6
|
[
"MIT"
] | null | null | null |
import decimal
from unittest import mock
from django.conf import settings
from django.test import modify_settings
from rest_framework import test
from rest_framework.reverse import reverse
import stripe
from restshop import serializers
from restshop.models import Order
from paymentmethods.stripejs.models import StripeInvoice
import restshop.exceptions
from restshop.tests.test_product import products_and_price
@modify_settings(INSTALLED_APPS={
'append': 'restshop.paymentmethods.stripejs'
})
class StripeTest(test.APITestCase):
def setUp(self):
stripe.api_key = settings.STRIPE_API_KEY
self.order = Order.objects.create(
email='tester@test.com',
)
self.order.items.create(
description='test purchase',
price='1000',
vat='250',
quantity=3,
product=products_and_price(1000).skus.all()[0]
)
session = self.client.session
session['order_id'] = self.order.pk
session.save()
def get_token(self):
return stripe.Token.create(card={
"number": '4242424242424242',
"exp_month": 12,
"exp_year": 2016,
"cvc": '123'
}).id
def test_pay(self):
response = self.client.post(
reverse(
'order-pay',
args=['stripejs.stripeinvoice']
),
{
'stripeToken': self.get_token(),
'order': serializers.OrderSerializer(instance=self.order).data
}
)
self.assertEqual(201, response.status_code, response.data)
self.assertEqual(0,
decimal.Decimal(response.data['owed']) -
decimal.Decimal(response.data['paid']))
order = Order.objects.get()
self.assertEqual(
Order.STATUS.completed,
order.status
)
self.assertEqual(
decimal.Decimal('3750.00'),
order.invoices.all()[0].paid
)
@mock.patch('stripe.Charge.create')
def test_card_error(self, create_mock):
create_mock.side_effect = stripe.CardError('fail!', '', '402')
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
try:
si.authorize()
except restshop.exceptions.PaymentFailed as e:
self.assertEqual('fail!', e.detail)
else:
self.assertRaises(restshop.exceptions.PaymentFailed, lambda: None)
def test_cancel_auth(self):
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
self.assertRaises(
restshop.exceptions.InvalidOperation,
si.cancel_auth
)
self.assertTrue(si.authorize())
self.assertTrue(si.cancel_auth())
si.refresh_from_db()
self.assertEqual(2, si.events.all().count())
self.assertEqual(StripeInvoice.STATUS.canceled, si.status)
| 31.069307
| 78
| 0.593372
| 321
| 3,138
| 5.697819
| 0.376947
| 0.039366
| 0.029524
| 0.037726
| 0.091853
| 0.091853
| 0.091853
| 0.091853
| 0.091853
| 0.091853
| 0
| 0.024102
| 0.299235
| 3,138
| 100
| 79
| 31.38
| 0.80764
| 0
| 0
| 0.10989
| 0
| 0
| 0.070427
| 0.017208
| 0
| 0
| 0
| 0
| 0.120879
| 1
| 0.054945
| false
| 0
| 0.131868
| 0.010989
| 0.208791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0cbe510c57f6be47472391d90b71a872f267467
| 9,887
|
py
|
Python
|
qutip/graph.py
|
anubhavvardhan/qutip
|
daf384840efbb44b86e39d8bda64d907d9f6b47f
|
[
"BSD-3-Clause"
] | null | null | null |
qutip/graph.py
|
anubhavvardhan/qutip
|
daf384840efbb44b86e39d8bda64d907d9f6b47f
|
[
"BSD-3-Clause"
] | null | null | null |
qutip/graph.py
|
anubhavvardhan/qutip
|
daf384840efbb44b86e39d8bda64d907d9f6b47f
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
__all__ = ['graph_degree', 'column_permutation', 'breadth_first_search',
'reverse_cuthill_mckee', 'maximum_bipartite_matching',
'weighted_bipartite_matching']
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_breadth_first_search, _node_degrees,
_reverse_cuthill_mckee, _maximum_bipartite_matching,
_weighted_bipartite_matching)
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A, start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : csc_matrix, csr_matrix
Input graph in CSC or CSR matrix format
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
num_rows = A.shape[0]
start = int(start)
order, levels = _breadth_first_search(A.indices, A.indptr, num_rows, start)
# since maybe not all nodes are in search, check for unused entires in
# arrays
return order[order != -1], levels[levels != -1]
def column_permutation(A):
"""
Finds the non-symmetric column permutation of A such that the columns
are given in ascending order according to the number of nonzero entries.
This is sometimes useful for decreasing the fill-in of sparse LU
factorization.
Parameters
----------
A : csc_matrix
Input sparse CSC sparse matrix.
Returns
-------
perm : array
Array of permuted row and column indices.
"""
if not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
count = np.diff(A.indptr)
perm = np.argsort(count)
return perm
def reverse_cuthill_mckee(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix
in Reverse-Cuthill McKee ordering. Since the input matrix must be
symmetric, this routine works on the matrix A+Trans(A) if the sym flag is
set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not
symmetric. This is because it is faster to do A+Trans(A) than it is to
check for symmetry for a generic matrix. If you are guaranteed that the
matrix is symmetric in structure (values of matrix element do not matter)
then set *sym=True*
Parameters
----------
A : csc_matrix, csr_matrix
Input sparse CSC or CSR sparse matrix format.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad
superoperators for use in iterative solver routines.
References
----------
E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric
Matrices", ACM '69 Proceedings of the 1969 24th national conference,
(1969).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
nrows = A.shape[0]
if not sym:
A = A + A.transpose()
return _reverse_cuthill_mckee(A.indices, A.indptr, nrows)
def maximum_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row or column permutations that removes nonzero
elements from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is nonsingular.
This function looks at the structure of the matrix only.
The input matrix will be converted to CSC matrix format if
necessary.
Parameters
----------
A : sparse matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError(
'Maximum bipartite matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _maximum_bipartite_matching(A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
I. S. Duff and J. Koster, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _weighted_bipartite_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
| 33.402027
| 79
| 0.670982
| 1,374
| 9,887
| 4.764192
| 0.27147
| 0.023831
| 0.019248
| 0.016499
| 0.346166
| 0.309349
| 0.303086
| 0.277116
| 0.277116
| 0.25634
| 0
| 0.006818
| 0.243451
| 9,887
| 295
| 80
| 33.515254
| 0.868316
| 0.645494
| 0
| 0.46875
| 0
| 0
| 0.179359
| 0.033808
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.046875
| 0
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d0af5cc2acc44430f9c71988996b1fd3a8a91a
| 8,473
|
py
|
Python
|
src/train_vae.py
|
katnoria/world-models
|
6584f35fa9508c991050ddc9c17f5862a00008fe
|
[
"Apache-2.0"
] | null | null | null |
src/train_vae.py
|
katnoria/world-models
|
6584f35fa9508c991050ddc9c17f5862a00008fe
|
[
"Apache-2.0"
] | null | null | null |
src/train_vae.py
|
katnoria/world-models
|
6584f35fa9508c991050ddc9c17f5862a00008fe
|
[
"Apache-2.0"
] | null | null | null |
# class Encoder:
# pass
# class Decoder:
# pass
# class VariationAutoEncoder:
# pass
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import pickle
import logging
from glob import glob
import numpy as np
from time import time
from datetime import datetime
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
if not os.path.exists("logs"):
os.makedirs("logs")
today = datetime.now().strftime('%Y%m%d')
logger = logging.getLogger('worldmodels')
logger.setLevel(logging.DEBUG)
# Create logger
logger = logging.getLogger("worldmodels")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger.setLevel(logging.DEBUG)
# Uncomment to enable console logger
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
streamhandler.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
filehandler = logging.FileHandler(filename='logs/dataset.{}.log'.format(today))
filehandler.setFormatter(formatter)
filehandler.setLevel(logging.DEBUG)
logger.addHandler(filehandler)
AUTOTUNE = tf.data.experimental.AUTOTUNE
def load_preprocess_image(fname, resize_to=[64,64]):
image = tf.io.read_file(fname)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.image.resize(image, [64, 64])
image = tf.image.resize(image, resize_to)
image /= 255.0
return image
INPUT_SHAPE = (64,64,3)
# INPUT_SHAPE = (128,128,3)
LATENT_DIM = 32
encoder_input = keras.Input(shape=(INPUT_SHAPE), name='encoder_input_image')
x = keras.layers.Conv2D(32, 4, strides=(2,2), activation='relu', name='conv-1')(encoder_input)
x = keras.layers.Conv2D(64, 4, strides=(2,2), activation='relu', name='conv-2')(x)
x = keras.layers.Conv2D(128, 4, strides=(2,2), activation='relu', name='conv-3')(x)
x = keras.layers.Conv2D(256, 4, strides=(2,2), activation='relu', name='conv-4')(x)
# x = keras.layers.Conv2D(512, 4, strides=(2,2), activation='relu', name='conv-5')(x)
encoder_last_conv_shape = K.int_shape(x)[1:]
logger.info("encoder_last_conv_shape: {}".format(encoder_last_conv_shape))
x = keras.layers.Flatten()(x)
mu = keras.layers.Dense(LATENT_DIM, activation='linear', name="mean")(x)
logvar = keras.layers.Dense(LATENT_DIM, activation='linear', name="variance")(x)
encoder = keras.Model(encoder_input, [mu, logvar], name='encoder')
encoder.summary()
def sample(args):
mean, logvar = args
# reparameterizaton trick: allows gradients to pass through the sample
# 1. sample from unit gaussian, then
# 2. multiply it with standard deviation and add mean
e = tf.random.normal(shape=(K.shape(mean)[0], LATENT_DIM))
return e * tf.math.exp(logvar) + mean
sampled_latent_vector = keras.layers.Lambda(sample)([mu, logvar])
decoder_input = keras.layers.Input(shape=K.int_shape(sampled_latent_vector)[1:], name='decoder_input')
x = keras.layers.Dense(np.prod(encoder_last_conv_shape))(decoder_input)
x = keras.layers.Reshape((1,1,np.prod(encoder_last_conv_shape)))(x)
x = keras.layers.Conv2DTranspose(128, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(64, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, kernel_size=6, strides=(2,2), activation='relu')(x)
# x = keras.layers.Conv2DTranspose(32, kernel_size=4, strides=(2,2), activation='relu')(x)
decoder_output = keras.layers.Conv2DTranspose(3, kernel_size=6, strides=(2,2))(x)
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()
# Taken from tensorflow VAE example
def log_normal_pdf(sample, mean, logvar):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=1)
@tf.function
def calculate_loss(mean, logvar, labels, decoded_logits):
xent_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=decoded_logits)
z = sample([mean, logvar])
logpx_z = -tf.reduce_sum(xent_loss, axis=[1,2,3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
loss = -tf.reduce_mean(logpx_z + logpz - logqz_x)
return loss
class VAE(keras.Model):
def __init__(self, encoder, decoder):
super(VAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
def train_vars(self):
return self.encoder.trainable_variables + self.decoder.trainable_variables
def encode(self, x):
encoded = self.encoder(x)
return encoded
def decode(self, z, apply_sigmoid=False):
logits = self.decoder(z)
if apply_sigmoid:
return tf.sigmoid(logits)
return logits
@tf.function
def train_step(train_x, model, optimizer):
with tf.GradientTape() as tape:
# use training inputs to approximate the posterior
mean, logvar = model.encode(train_x)
# sample latent vector from the learned mean and variance
latent_z = sample([mean, logvar])
# decode z
decoded_logits = model.decode(latent_z)
# calculate loss
loss = calculate_loss(mean, logvar, labels=train_x, decoded_logits=decoded_logits)
# calculate gradients
gradients = tape.gradient(loss, model.trainable_variables)
# apply gradients
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train(fnames, output_dirname="output", epochs=600, save_every_pct=0.3, print_every_pct=0.05):
logger.info('Total files: {}'.format(len(fnames)))
path_ds = tf.data.Dataset.from_tensor_slices(fnames)
image_ds = path_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE)
# Dataset
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = len(fnames)
train_dataset = image_ds \
.shuffle(SHUFFLE_BUFFER_SIZE) \
.repeat() \
.batch(BATCH_SIZE) \
.prefetch(buffer_size=AUTOTUNE)
if not os.path.exists(output_dirname):
os.makedirs('{}/ckpt'.format(output_dirname))
os.makedirs('{}/imgs'.format(output_dirname))
# Number of training epochs
# EPOCHS = 600
logger.info('Training epochs: {}'.format(epochs))
# Initialize the Variational Autoencoder model
model = VAE(encoder, decoder)
# Define optimizer
optimizer = keras.optimizers.Adam(1e-4)
# keep track of losses
losses = []
# How often to print the loss
print_every = max(int(print_every_pct * epochs), 1)
# Model Checkpoint
# Save model and optimizer
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
# Set save path and how many checkpoints to save
checkpoint_path = '{}/ckpt/'.format(output_dirname)
logger.info('Checkpoints will be stored at {}'.format(checkpoint_path))
manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=2)
# Load the latest checkpoint and restore
latest_ckpt = manager.latest_checkpoint
ckpt.restore(latest_ckpt)
if latest_ckpt:
logger.info('Restored from {}'.format(latest_ckpt))
else:
logger.info('Training from scratch')
# How often to save the checkpoint
save_every = max(int(save_every_pct * epochs), 1)
# We are now ready to start the training loop
elapsed_loop_time = time()
for epoch in range(0, epochs):
for train_x in train_dataset:
loss = train_step(train_x, model, optimizer)
losses.append(loss)
if epoch % print_every == 0:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logger.info('{}:Epoch {}/{}: train loss {} in {} seconds'.format(epoch, epochs, losses[-1], time()-elapsed_loop_time))
elapsed_loop_time = time()
if epoch % save_every == 0:
save_path = manager.save()
logger.info('Saved checkpoint for step {}:{}'.format(epoch, save_path))
# Final Save
save_path = manager.save()
logger.info('Saved checkpoint for step {}'.format(save_path))
if __name__ == "__main__":
# Toons
# fnames = glob('{}/*.png'.format("/mnt/bigdrive/datasets/cartoonset/cartoonset10k/"))
# train(fnames, output_dirname="toons128")
# Car racing
fnames = glob('{}/*.png'.format("/mnt/bigdrive/projects/public_repos/world-models/src/imgs/"))
train(fnames, output_dirname="car_racing")
| 36.521552
| 130
| 0.690901
| 1,153
| 8,473
| 4.928881
| 0.248916
| 0.032905
| 0.025339
| 0.03009
| 0.208869
| 0.144818
| 0.099067
| 0.099067
| 0.055077
| 0.055077
| 0
| 0.020651
| 0.177033
| 8,473
| 231
| 131
| 36.679654
| 0.79435
| 0.148708
| 0
| 0.066667
| 0
| 0
| 0.085053
| 0.011433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.086667
| 0.006667
| 0.22
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d3c8c44e9a78dfdefd3d78c0e47b2746c32032
| 5,233
|
py
|
Python
|
multitidal/client_lib.py
|
xa4a/multitidal
|
26f757f12464e8f935c0389c6356b97cfaa9f03f
|
[
"MIT"
] | 2
|
2021-12-01T05:39:05.000Z
|
2021-12-07T07:26:16.000Z
|
multitidal/client_lib.py
|
xa4a/multitidal
|
26f757f12464e8f935c0389c6356b97cfaa9f03f
|
[
"MIT"
] | 1
|
2021-12-02T03:54:16.000Z
|
2021-12-02T03:54:16.000Z
|
multitidal/client_lib.py
|
parabolala/multitidal
|
26f757f12464e8f935c0389c6356b97cfaa9f03f
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import os
import pty
import shutil
import sys
import tty
import termios
import time
import threading
import tornado.iostream
from tornado.ioloop import IOLoop
from tornado.websocket import websocket_connect
ioloop = tornado.ioloop.IOLoop.instance()
SSH_LOGIN = "root"
SSH_PASSWORD = "algorave"
SCREEN_TO_SCREEN_0_SEQ = b"ls -l\r\x1bOC" + b"\x010" # ^A 0
async def send_stdin_to_ws_task(ws, on_finish_cb):
print("mangling terminal")
try:
fn = os.dup(sys.stdin.fileno())
inp = tornado.iostream.PipeIOStream(fn)
mode = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(fn)
while True:
try:
print("reading stdin", end="\r\n")
content = await inp.read_bytes(100, partial=True)
print("read stdin", end="\r\n")
# content = await self.inp.read_bytes(100, partial=True)
except tornado.iostream.StreamClosedError:
print("Stdin closed", end="\r\n")
# await self.finish()
ioloop.add_callback(on_finish_cb)
break
print(f"stdin: {content}", end="\r\n")
if content[0] == 3 or not content: # CTRL-C
print("Got a ^C", end="\r\n")
ioloop.add_callback(on_finish_cb)
break
ioloop.add_callback(
ws.write_message,
json.dumps(
{
"client_command": "keystrokes",
"keystrokes": [int(x) for x in content],
}
),
)
print("no exc", end="\r\n")
except asyncio.CancelledError:
print("stdin read task cancelled", end="\r\n")
except Exception as e: # pylint: disable=broad-except
print(f"Exception: {e}")
finally:
inp.close()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, mode)
print("finally")
async def run_ssh(host, port, login=SSH_LOGIN, password=SSH_PASSWORD):
os.environ["SSHPASS"] = password
ssh_cmd = [
"ssh",
"-o",
"PreferredAuthentications=password",
"-o",
"PubkeyAuthentication=no",
"-o",
"StrictHostKeyChecking=no", # Skip fingerpint warning.
f"{login}@{host}",
"-p",
str(port),
]
sshpass_cmd = [shutil.which("sshpass"), "-e"] + ssh_cmd
args = sshpass_cmd
print(" ".join(args))
e = threading.Event()
def stdin_read(fd):
if not e.is_set():
e.set()
return SCREEN_TO_SCREEN_0_SEQ + os.read(fd, 1024)
b = os.read(fd, 1024)
return b
def master_read(fd):
b = os.read(fd, 1024)
return b
# Let Web UI connect to screen 0 first.
time.sleep(3)
res = pty.spawn(args, master_read=master_read, stdin_read=stdin_read)
print(f"ssh returned {res}")
class Client:
mode: str
def __init__(self, url, timeout):
self.url = url
self.timeout = timeout
self.ioloop = IOLoop.instance()
self.ws = None
self.send_stdin_task = None
async def connect(self):
print("trying to connect")
try:
self.ws = await websocket_connect(self.url)
except Exception as e: # pylint: disable=broad-except
print(f"connection error: {str(e)}")
else:
print("connected")
# await self.ws.write_message({'client': self.i})
self.mode = "idle"
self.ioloop.spawn_callback(self.run_idle)
self.ioloop.spawn_callback(self.run)
def finish_ws(self):
if self.ws:
self.ws.close()
self.ws = None
async def finish(self):
if self.send_stdin_task:
await self.stop_idle()
self.finish_ws()
self.ioloop.stop()
async def run_idle(self):
assert not self.send_stdin_task
print("running idle, spawning task")
self.send_stdin_task = asyncio.create_task(
send_stdin_to_ws_task(self.ws, self.finish)
)
async def stop_idle(self):
assert self.send_stdin_task
self.send_stdin_task.cancel()
await self.send_stdin_task
self.send_stdin_task = None
@staticmethod
async def run_ssh(host, port):
# Blocks ioloop
await run_ssh(host, port)
async def run(self):
while True:
msg = await self.ws.read_message()
if msg is None:
print("server left, terminating", end="\r\n")
self.ioloop.add_callback(self.finish)
return
msg = json.loads(msg)
print(f"got msg: {msg}", end="\r\n")
if "mode" not in msg:
continue
if msg["mode"] == "ssh":
host, port = msg["ssh"]["host"], msg["ssh"]["port"]
print(f"Connecting to ssh {host}:{port}...", end="\r\n")
await self.stop_idle()
await self.run_ssh(host, port)
print("restarting idle task")
self.finish_ws()
await self.connect()
break
| 28.911602
| 73
| 0.549016
| 634
| 5,233
| 4.399054
| 0.26183
| 0.03227
| 0.017928
| 0.048763
| 0.222302
| 0.17067
| 0.120473
| 0.058802
| 0.034421
| 0.034421
| 0
| 0.008331
| 0.334798
| 5,233
| 180
| 74
| 29.072222
| 0.792876
| 0.051978
| 0
| 0.182432
| 0
| 0
| 0.115556
| 0.016162
| 0
| 0
| 0
| 0
| 0.013514
| 1
| 0.027027
| false
| 0.040541
| 0.087838
| 0
| 0.155405
| 0.135135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d425874c7577ffb290b5d9bb87cc599dbdcb1a
| 2,790
|
py
|
Python
|
scrapy/contracts/default.py
|
zyuchuan/scrapy
|
ce24f53957b41877319a5ffc6cf26f0a18baaec2
|
[
"BSD-3-Clause"
] | null | null | null |
scrapy/contracts/default.py
|
zyuchuan/scrapy
|
ce24f53957b41877319a5ffc6cf26f0a18baaec2
|
[
"BSD-3-Clause"
] | null | null | null |
scrapy/contracts/default.py
|
zyuchuan/scrapy
|
ce24f53957b41877319a5ffc6cf26f0a18baaec2
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from scrapy.item import BaseItem
from scrapy.http import Request
from scrapy.exceptions import ContractFail
from scrapy.contracts import Contract
# contracts
class UrlContract(Contract):
""" Contract to set the url of the request (mandatory)
@url http://scrapy.org
"""
name = 'url'
def adjust_request_args(self, args):
args['url'] = self.args[0]
return args
class CallbackKeywordArgumentsContract(Contract):
""" Contract to set the keyword arguments for the request.
The value should be a JSON-encoded dictionary, e.g.:
@cb_kwargs {"arg1": "some value"}
"""
name = 'cb_kwargs'
def adjust_request_args(self, args):
args['cb_kwargs'] = json.loads(' '.join(self.args))
return args
class ReturnsContract(Contract):
""" Contract to check the output of a callback
general form:
@returns request(s)/item(s) [min=1 [max]]
e.g.:
@returns request
@returns request 2
@returns request 2 10
@returns request 0 10
"""
name = 'returns'
objects = {
'request': Request,
'requests': Request,
'item': (BaseItem, dict),
'items': (BaseItem, dict),
}
def __init__(self, *args, **kwargs):
super(ReturnsContract, self).__init__(*args, **kwargs)
assert len(self.args) in [1, 2, 3]
self.obj_name = self.args[0] or None
self.obj_type = self.objects[self.obj_name]
try:
self.min_bound = int(self.args[1])
except IndexError:
self.min_bound = 1
try:
self.max_bound = int(self.args[2])
except IndexError:
self.max_bound = float('inf')
def post_process(self, output):
occurrences = 0
for x in output:
if isinstance(x, self.obj_type):
occurrences += 1
assertion = (self.min_bound <= occurrences <= self.max_bound)
if not assertion:
if self.min_bound == self.max_bound:
expected = self.min_bound
else:
expected = '%s..%s' % (self.min_bound, self.max_bound)
raise ContractFail("Returned %s %s, expected %s" % \
(occurrences, self.obj_name, expected))
class ScrapesContract(Contract):
""" Contract to check presence of fields in scraped items
@scrapes page_name page_body
"""
name = 'scrapes'
def post_process(self, output):
for x in output:
if isinstance(x, (BaseItem, dict)):
missing = [arg for arg in self.args if arg not in x]
if missing:
raise ContractFail(
"Missing fields: %s" % ", ".join(missing))
| 26.074766
| 70
| 0.576344
| 330
| 2,790
| 4.766667
| 0.306061
| 0.050858
| 0.045772
| 0.026701
| 0.164018
| 0.102988
| 0.072473
| 0
| 0
| 0
| 0
| 0.009958
| 0.316129
| 2,790
| 106
| 71
| 26.320755
| 0.814465
| 0.178136
| 0
| 0.206897
| 0
| 0
| 0.055093
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 1
| 0.086207
| false
| 0
| 0.086207
| 0
| 0.362069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d6966f0f4824c8705c24412698017423279002
| 2,193
|
py
|
Python
|
scrapy_template/scrapy_template/pipelines.py
|
kk0501/spider
|
404540a76922885f9dd12f9a513f5ec88b0d2072
|
[
"MIT"
] | null | null | null |
scrapy_template/scrapy_template/pipelines.py
|
kk0501/spider
|
404540a76922885f9dd12f9a513f5ec88b0d2072
|
[
"MIT"
] | null | null | null |
scrapy_template/scrapy_template/pipelines.py
|
kk0501/spider
|
404540a76922885f9dd12f9a513f5ec88b0d2072
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from hashlib import md5
from scrapy import log
from twisted.enterprise import adbapi
from scrapy_template.items import ScrapyTemplateItem
class ScrapyTemplatePipeline(object):
def __init__(self, dbpool):
self.urls_seen = set()
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
if isinstance(item, ScrapyTemplateItem):
if item['url'] in self.urls_seen:
raise DropItem("Duplicate item found: %s" % item['url'])
else:
self.urls_seen.add(item['url'])
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
else:
return item
def _do_upsert(self, conn, item, spider):
guid = self._get_id(item)
conn.execute("""SELECT EXISTS(
SELECT 1 FROM example WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if not ret:
conn.execute("""
INSERT INTO example (category, name, color, images, price, url, guid)
VALUES (%s, %s, %s, %s, %s, %s, %s)
""", (item['category'], item['name'], item['color'],
item['images'], item['price'], item['url'], guid))
spider.log("Item stored in db: %s %r" % (guid, item))
def _handle_error(self, failure, item, spider):
log.err(failure)
def _get_id(self, item):
return md5(item['url']).hexdigest()
| 34.265625
| 85
| 0.579115
| 257
| 2,193
| 4.828794
| 0.44358
| 0.00967
| 0.012087
| 0.012893
| 0.005641
| 0.005641
| 0
| 0
| 0
| 0
| 0
| 0.003876
| 0.294118
| 2,193
| 64
| 86
| 34.265625
| 0.797804
| 0.082535
| 0
| 0.04
| 0
| 0.02
| 0.184853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.02
| 0.1
| 0.02
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d7c84ced6a300c631d0fec1f9dd425ca8e581c
| 2,726
|
py
|
Python
|
run_training_size_bootstrap.py
|
willferreira/multilabel-stance-detection
|
ddc0ed9caa26b63f40e89a377f1738e83fcb7724
|
[
"MIT"
] | null | null | null |
run_training_size_bootstrap.py
|
willferreira/multilabel-stance-detection
|
ddc0ed9caa26b63f40e89a377f1738e83fcb7724
|
[
"MIT"
] | null | null | null |
run_training_size_bootstrap.py
|
willferreira/multilabel-stance-detection
|
ddc0ed9caa26b63f40e89a377f1738e83fcb7724
|
[
"MIT"
] | null | null | null |
import click
import pickle
import numpy as np
from collections import defaultdict
from utils import reset_seeds, get_dataset, load_embeddings
from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper
from mlp_utils import CrossLabelDependencyLoss
def get_random_sample(dataset_name='bbc', train_frac=0.25):
# get model runner specific dataset
_, _, y_train, y_test = get_dataset(dataset_name)
X_train, X_test = load_embeddings(dataset_name)
grps = y_train.apply(lambda v: ''.join(map(str, v)), axis=1).to_frame(0).groupby(0)[0]
train_idx = grps.apply(lambda g: g.sample(frac=train_frac)).index.get_level_values(1)
X_train_sample = X_train.loc[train_idx, :]
y_train_sample = y_train.loc[train_idx, :]
return X_train_sample, X_test, y_train_sample, y_test
def _get_label_set(y):
return set(y.apply(lambda v: ''.join(map(str, v)), axis=1).values)
@click.command()
@click.option('--n-samples', default=10)
@click.option('--dataset-name', default='moral-dataset-MeToo')
def run(n_samples, dataset_name):
mlp_cld_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_powerset_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_labels_bootstrap_results = defaultdict(lambda: defaultdict(list))
reset_seeds()
for i in range(n_samples):
print('Running bootstrap sample: {}'.format(i + 1))
for f in np.arange(0.1, 1.1, 0.1):
X_train, X_test, y_train, y_test = get_random_sample(dataset_name, train_frac=f)
print('Training set size: {}'.format(X_train.shape))
print('Test set size: {}'.format(X_test.shape))
mlp_powerset_model = PowersetKerasWrapper(columns=y_train.columns)
mlp_powerset_model.fit(X_train.values, y_train.values)
y_pred_mlp = mlp_powerset_model.predict(X_test.values)
mlp_powerset_bootstrap_results[i][f].append(y_pred_mlp)
cld_loss = CrossLabelDependencyLoss(alpha=0.2)
mlp_cld_model = MultiOutputKerasWrapper(columns=y_train.columns, loss=cld_loss)
mlp_cld_model.fit(X_train.values, y_train.values)
y_pred_cld = mlp_cld_model.predict(X_test.values)
mlp_cld_bootstrap_results[i][f].append(y_pred_cld)
mlp_labels_bootstrap_results[i][f].append((_get_label_set(y_train), _get_label_set(y_test)))
with open('training_size_bootstrap_{}.pkl'.format(dataset_name), 'wb') as f:
pickle.dump({'cld': dict(mlp_cld_bootstrap_results),
'powerset': dict(mlp_powerset_bootstrap_results),
'labels': dict(mlp_labels_bootstrap_results)}, f)
if __name__ == '__main__':
run()
| 41.938462
| 104
| 0.705796
| 387
| 2,726
| 4.625323
| 0.260982
| 0.036872
| 0.026816
| 0.020112
| 0.274302
| 0.217877
| 0.162011
| 0.072626
| 0.072626
| 0.041341
| 0
| 0.008945
| 0.179751
| 2,726
| 64
| 105
| 42.59375
| 0.791592
| 0.012106
| 0
| 0
| 0
| 0
| 0.063174
| 0.011148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.148936
| 0.021277
| 0.255319
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d8a4edfd7425e0db4ca0bd8268ff4b94c0916a
| 1,268
|
py
|
Python
|
code/evaluate.py
|
Shuailong/CCGSupertagging
|
891a6a477a4a05daeb847d4a4c33a1bc929d97b2
|
[
"MIT"
] | 3
|
2018-11-09T04:33:12.000Z
|
2021-06-04T04:23:07.000Z
|
code/evaluate.py
|
Shuailong/CCGSupertagging
|
891a6a477a4a05daeb847d4a4c33a1bc929d97b2
|
[
"MIT"
] | 2
|
2017-03-13T02:56:09.000Z
|
2019-07-27T02:47:29.000Z
|
code/evaluate.py
|
Shuailong/CCGSupertagging
|
891a6a477a4a05daeb847d4a4c33a1bc929d97b2
|
[
"MIT"
] | 1
|
2020-11-25T06:09:33.000Z
|
2020-11-25T06:09:33.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
evaluate.py
Created by Shuailong on 2016-12-2.
Evaluate model accuracy on test set.
"""
from __future__ import print_function
from time import time
from keras.models import load_model
import os
from utils import true_accuracy
from dataset import get_data
from train import MODEL_FILE, MODEL_DIR
from train import data_generator
def main():
start_time = time()
print('\nGetting data...')
data = get_data(force=False)
X_test = data['X_test']
X_test_feats = data['X_test_feats']
y_test = data['y_test']
tag_size = len(data['tag_index'])
print('\nLoading models...')
model = load_model(os.path.join(MODEL_DIR, MODEL_FILE), custom_objects={'true_accuracy': true_accuracy})
print('\nEvaluating...')
_, true_acc = model.evaluate_generator(data_generator(X_test, X_test_feats, y_test, tag_size),
val_samples=len(X_test))
print('Test accuracy: {}.'.format(true_acc))
seconds = time() - start_time
minutes = seconds / 60
print('[Finished in {} seconds ({} minutes)]'.format(str(round(seconds, 1)),
str(round(minutes, 1))))
if __name__ == '__main__':
main()
| 25.877551
| 108
| 0.645899
| 170
| 1,268
| 4.523529
| 0.423529
| 0.045514
| 0.039012
| 0.026008
| 0.06502
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012308
| 0.231073
| 1,268
| 48
| 109
| 26.416667
| 0.77641
| 0.096215
| 0
| 0
| 0
| 0
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.285714
| 0
| 0.321429
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0d98d3fbe99c6483d07cbca24e2f2d19d6ccfe4
| 4,691
|
py
|
Python
|
solum/api/controllers/v1/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/api/controllers/v1/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/api/controllers/v1/assembly.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import wsme
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import assembly
import solum.api.controllers.v1.userlog as userlog_controller
from solum.api.handlers import assembly_handler
from solum.common import exception
from solum.common import request
from solum import objects
from solum.openstack.common.gettextutils import _
class AssemblyController(rest.RestController):
"""Manages operations on a single assembly."""
def __init__(self, assembly_id):
super(AssemblyController, self).__init__()
self._id = assembly_id
@pecan.expose()
def _lookup(self, primary_key, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
if primary_key == 'logs':
logs = userlog_controller.UserlogsController(self._id)
return logs, remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly)
def get(self):
"""Return this assembly."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(handler.get(self._id),
pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly)
def put(self, data):
"""Modify this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
res = handler.update(self._id,
data.as_dict(objects.registry.Assembly))
return assembly.Assembly.from_db_model(res, pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(status_code=204)
def delete(self):
"""Delete this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return handler.delete(self._id)
class AssembliesController(rest.RestController):
"""Manages operations on the assemblies collection."""
@pecan.expose()
def _lookup(self, assembly_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return AssemblyController(assembly_id), remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly,
status_code=201)
def post(self, data):
"""Create a new assembly."""
js_data = data.as_dict(objects.registry.Assembly)
if data.plan_uri is not wsme.Unset:
plan_uri = data.plan_uri
if plan_uri.startswith(pecan.request.host_url):
pl_uuid = plan_uri.split('/')[-1]
pl = objects.registry.Plan.get_by_uuid(
pecan.request.security_context, pl_uuid)
js_data['plan_id'] = pl.id
else:
# TODO(asalkeld) we are not hosting the plan so
# download the plan and insert it into our db.
raise exception.BadRequest(reason=_(
'The plan was not hosted in solum'))
if js_data.get('plan_id') is None:
raise exception.BadRequest(reason=_(
'The plan was not given or could not be found'))
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(
handler.create(js_data), pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose([assembly.Assembly])
def get_all(self):
"""Return all assemblies, based on the query provided."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return [assembly.Assembly.from_db_model(assm, pecan.request.host_url)
for assm in handler.get_all()]
| 39.420168
| 77
| 0.675336
| 566
| 4,691
| 5.413428
| 0.305654
| 0.032311
| 0.039164
| 0.052872
| 0.461815
| 0.421997
| 0.389687
| 0.389687
| 0.361619
| 0.359661
| 0
| 0.005884
| 0.239181
| 4,691
| 118
| 78
| 39.754237
| 0.85262
| 0.186741
| 0
| 0.3125
| 0
| 0
| 0.025232
| 0
| 0
| 0
| 0
| 0.008475
| 0
| 1
| 0.1
| false
| 0
| 0.1375
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0dbf6e6b17f8d31b9acfaa5334ab33b086914a3
| 1,379
|
py
|
Python
|
src/agility/usc/settings.py
|
bobbyluig/6.A01
|
16dd8963951eca4a1312a15c216d0cc3c117d063
|
[
"MIT"
] | null | null | null |
src/agility/usc/settings.py
|
bobbyluig/6.A01
|
16dd8963951eca4a1312a15c216d0cc3c117d063
|
[
"MIT"
] | null | null | null |
src/agility/usc/settings.py
|
bobbyluig/6.A01
|
16dd8963951eca4a1312a15c216d0cc3c117d063
|
[
"MIT"
] | 1
|
2021-02-24T07:13:01.000Z
|
2021-02-24T07:13:01.000Z
|
from agility.usc.enumeration import uscSerialMode, ChannelMode, HomeMode
from agility.usc.reader import BytecodeReader
class UscSettings:
def __init__(self):
self.servosAvailable = 6
self.servoPeriod = 156
self.miniMaestroServoPeriod = 80000
self.servoMultiplier = 1
self.serialMode = uscSerialMode.SERIAL_MODE_UART_DETECT_BAUD_RATE
self.fixedBaudRate = 9600
self.enableCrc = False
self.neverSuspend = False
self.serialDeviceNumber = 12
self.miniSscOffset = 0
self.serialTimeout = 0
self.scriptDone = True
self.channelSettings = []
self.enablePullups = True
self.scriptInconsistent = False
self.script = None
self.bytecodeProgram = None
def __len__(self):
return len(self.channelSettings)
def setAndCompileScript(self, script):
self.script = None
reader = BytecodeReader()
self.bytecodeProgram = reader.read(script, len(self) != 6)
self.script = script
class ChannelSetting:
def __init__(self):
self.name = ''
self.mode = ChannelMode.Servo
self.homeMode = HomeMode.Off
self.home = 6000
self.minimum = 3968
self.maximum = 8000
self.neutral = 6000
self.range = 1905
self.speed = 0
self.acceleration = 0
| 29.340426
| 73
| 0.636693
| 138
| 1,379
| 6.23913
| 0.485507
| 0.046458
| 0.03252
| 0.034843
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041837
| 0.28934
| 1,379
| 46
| 74
| 29.978261
| 0.836735
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0.025
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e0334b9f18fc8e44cf7c368bc6aba17a751a2d
| 1,123
|
py
|
Python
|
app/app8_18mix/h_noSeqSearch.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | 1
|
2020-05-30T06:01:50.000Z
|
2020-05-30T06:01:50.000Z
|
app/app8_18mix/h_noSeqSearch.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | null | null | null |
app/app8_18mix/h_noSeqSearch.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | 1
|
2019-10-20T21:11:48.000Z
|
2019-10-20T21:11:48.000Z
|
import sys
import csv
import os
sys.path.append('../../')
import h_lib
import h_lib_noSeqSearch
in_strFastaFilename = '{!s}/data/protein/18mix/18mix_db_plus_contaminants_20081209.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/18mix/18_mixtures_peptide_identification.txt'.format(os.environ.get('HOME'))
out_strOutputBaseDir = './sparseData_h'
out_strFile = out_strOutputBaseDir + "/h_noSeqSearch.csv"
YInfo = h_lib.getPeptides(in_strPeptideFilename, "\t", 0, 2)
###assuming proteins are already broken to individual files under in_strProtRefsDir
#XMatchProb = h_lib.getYInfo(YInfo, in_strProtRefsDir, strXMatchProb_filename, True)
XMatchProb = h_lib_noSeqSearch.getXInfo(YInfo, in_strPeptideFilename, "\t", 0, 1)
YMatchProbCount = h_lib.getPeptideProteinMatches(YInfo, XMatchProb)
h_lib.updateXMatchingProbabilities(XMatchProb, YMatchProbCount)
XPred = h_lib.getAccumulatedXMatchingProbabilities(XMatchProb)
XPred.sort()
with open(out_strFile, "w") as bfFile:
for row in XPred:
bfFile.write('{!s},{:.6f}\n'.format(row[0], row[1]))
print("result saved in:" + out_strFile)
| 38.724138
| 120
| 0.782725
| 146
| 1,123
| 5.808219
| 0.513699
| 0.037736
| 0.049528
| 0.040094
| 0.051887
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022439
| 0.087266
| 1,123
| 28
| 121
| 40.107143
| 0.804878
| 0.145147
| 0
| 0
| 0
| 0
| 0.216527
| 0.132845
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e04165ecde0d603bc47e0b5c5deaa17a56ab3a
| 700
|
py
|
Python
|
normalizer.py
|
ashokn414/python_floating_conversions
|
7a132c703272e6651daf555816171f04ee5b5555
|
[
"Apache-2.0"
] | null | null | null |
normalizer.py
|
ashokn414/python_floating_conversions
|
7a132c703272e6651daf555816171f04ee5b5555
|
[
"Apache-2.0"
] | null | null | null |
normalizer.py
|
ashokn414/python_floating_conversions
|
7a132c703272e6651daf555816171f04ee5b5555
|
[
"Apache-2.0"
] | null | null | null |
# for normalization we need to have the maxima of x and y values with the help of which
# we can normalise the given values
import csv
filename = "values.csv"
fields = []
rows = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
fields = next(reader)
for row in reader:
rows.append(row)
for row in rows:
for col in row:
a = col[0]
norm=50
#a = float(input("enter the x cordinate:"))
#b = float(input("enter the y cordinate:"))
if (a>norm or b>norm or a<-(norm) or b<-(norm)):
print("the value given is invalid/out of bound")
else:
a = a/norm
b = b/norm
print("the normalized values are "+str(a)+","+str(b))
| 26.923077
| 89
| 0.615714
| 115
| 700
| 3.747826
| 0.478261
| 0.034803
| 0.037123
| 0.083527
| 0.055684
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005758
| 0.255714
| 700
| 26
| 90
| 26.923077
| 0.821497
| 0.291429
| 0
| 0
| 0
| 0
| 0.164882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e23eae8f30892f74e1745a5cf500f5f0c7d685
| 3,477
|
py
|
Python
|
pygdp/fwgs.py
|
jiwalker-usgs/pyGDP
|
dca4789fb0c53c889d6fa1b38ec867bc939a2d04
|
[
"CC0-1.0"
] | null | null | null |
pygdp/fwgs.py
|
jiwalker-usgs/pyGDP
|
dca4789fb0c53c889d6fa1b38ec867bc939a2d04
|
[
"CC0-1.0"
] | null | null | null |
pygdp/fwgs.py
|
jiwalker-usgs/pyGDP
|
dca4789fb0c53c889d6fa1b38ec867bc939a2d04
|
[
"CC0-1.0"
] | null | null | null |
from pygdp import _execute_request
from pygdp import _get_geotype
from owslib.util import log
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs,
verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, WFS_URL, outputfname, sleepSecs):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = _execute_request.dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if weighted==False:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME",attribute),
("DATASET_URI", dataSetURI),
("TIME_START",startTime),
("TIME_END",endTime),
("REQUIRE_FULL_COVERAGE",str(coverage).lower()),
("DELIMITER",delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE",str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats=len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats=1
if isinstance(varID, list):
num_varIDs=len(varID)
else:
num_varIDs=1
inputs = [('','')]*(len(solo_inputs)+num_varIDs+num_stats)
count=0
rmvCnt=0
for solo_input in solo_inputs:
if solo_input[1]!=None:
inputs[count] = solo_input
count+=1
else:
rmvCnt+=1
del inputs[count:count+rmvCnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat_in)
inputs[count] = ("STATISTICS",stat_in)
count+=1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat)
inputs[count] = ("STATISTICS",stat)
count+=1
if num_varIDs > 1:
for var in varID:
inputs[count] = ("DATASET_ID",var)
count+=1
elif num_varIDs == 1:
inputs[count] = ("DATASET_ID",varID)
output = "OUTPUT"
return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)
| 39.965517
| 157
| 0.595628
| 370
| 3,477
| 5.454054
| 0.372973
| 0.023786
| 0.035679
| 0.051536
| 0.170466
| 0.170466
| 0.170466
| 0.135778
| 0.135778
| 0.135778
| 0
| 0.00608
| 0.29048
| 3,477
| 86
| 158
| 40.430233
| 0.811917
| 0.094334
| 0
| 0.112903
| 0
| 0.032258
| 0.225257
| 0.06491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.048387
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e3e3c9acda1bb91d2a503dbb2cdcf350023dcd
| 45,861
|
py
|
Python
|
wizbot.py
|
Wizard-Of-Chaos/WizardBot
|
75a2e482c7d7921e9a06dde4d210c68330c6fbe2
|
[
"MIT"
] | null | null | null |
wizbot.py
|
Wizard-Of-Chaos/WizardBot
|
75a2e482c7d7921e9a06dde4d210c68330c6fbe2
|
[
"MIT"
] | null | null | null |
wizbot.py
|
Wizard-Of-Chaos/WizardBot
|
75a2e482c7d7921e9a06dde4d210c68330c6fbe2
|
[
"MIT"
] | null | null | null |
#WIZARD BOT IS LIVE
import calendar
import discord as dc
from discord.ext.commands import Bot
from discord.ext import commands
from functools import partial
import asyncio as aio
import time
from random import randint
from datetime import datetime
from discord.ext import commands
from guildconfig import GuildConfig
from rolesaver import RoleSaver
#initializes bot, sets up command sign
bot = commands.Bot(command_prefix = '!')
bot.remove_command('help')
guild_config = GuildConfig(bot, 'config.pkl')
role_saver = RoleSaver(bot, 'roles.pkl')
#GAME STUFF
class Monster:
def __init__(self, speed, damage, health, dmg_type):
self.spd = speed
self.dmg = damage
self.hp = health
self.dmg_type = dmg_type
self.is_alive = True
#All integers.
#Last one is 1 or 0 - there are two damage types. Magical and physical.
#Physical is 0, Magical is 1.
#Attacks return a tuple containing a 1 or a 0 as the first number, then the damage as the second number.
#ACCESSORS
def health(self):
return self.hp
def speed(self):
return self.spd
def damage(self):
return self.dmg
def life(self):
return self.is_alive
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def make_attack(self):
attack = ""
attack += str(self.dmg_type)
attack += " "
attack += str(self.dmg)
return attack
class Player:
def __init__(self):
self.hp = 100 #Classic!
self.dmg = 10
self.shield = 0
self.s_dur = 0
self.is_alive = True
#Player has four shield conditions.
#0 - has no shield. 1 - Physical shield. 2 - Magical shield. 3 - Both.
#ACCESSORS
def damage(self):
return self.dmg
def life(self):
return self.is_alive
def shield_type(self):
return self.shield
def shield_dur(self):
return self.s_dur
def health(self):
return self.hp
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def shield_hit(self):
self.s_dur = self.s_dur - 1
if self.s_dur == 0:
self.shield = 0
#Kills your shield when the durability hits 0.
def heal(self, heal):
self.hp = self.hp + heal
def dangerify(self, damage):
self.dmg = self.dmg + damage
def get_shield(self, shield):
#This one's a bit tricky. The shield is 0 or 1 - Physical or magical.
#It then updates the player's shield accordingly.
if shield == 0:
if self.shield == 0:
self.shield = 1
self.s_dur = 10
if self.shield == 2:
self.shield = 3
self.s_dur = 5
elif shield == 1:
if self.shield == 0:
self.shield = 2
self.s_dur = 10
if self.shield == 1:
self.shield = 3
self.s_dur = 5
#Shield durabilty goes to 5, regardless of what it was before, on picking up a SECOND shield.
#Other four cases don't need to be covered.
#WIZBOT OLD STUFF ENDS HERE
#FUNCTIONS HERE
def get_token():
with open('token.dat', 'r') as tokenfile:
return ''.join(
chr(int(''.join(c), 16))
for c in zip(*[iter(tokenfile.read().strip())]*2)
)
def monthdelta(date, delta):
m, y = (date.month+delta) % 12, date.year + ((date.month)+delta-1) // 12
if not m: m = 12
d = min(date.day, calendar.monthrange(y, m)[1])
return date.replace(day=d, month=m, year=y)
async def get_last_seen(member, pendant=None):
lastseen = None
for channel in member.guild.text_channels:
lastmsg = await channel.history(limit=None, after=pendant).get(author__name=member.display_name)
if lastmsg and (lastseen is None or lastseen < lastmsg.created_at):
lastseen = lastmsg.created_at
return lastseen
#START OF EVENTS
@bot.event
async def on_ready():
print(f'{bot.user} has connected to Discord!')
@bot.event
async def on_message(message):
if message.content == "EAT THAT HORSE!":
await message.channel.send(":horse:")
await bot.process_commands(message)
@bot.event
async def on_message_edit(bfr, aft):
if bfr.author == bot.user:
return
if not hasattr(bfr.channel, 'guild'):
return
guild_id = bfr.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(color=dc.Color.gold(), timestamp=aft.created_at)
embed.set_author(
name=f'@{bfr.author} edited a message in #{bfr.channel}:',
icon_url=bfr.author.avatar_url,
)
embed.add_field(name='**Before:**', value=bfr.content, inline=False)
embed.add_field(name='**After:**', value=aft.content, inline=False)
embed.add_field(name='**MESSAGE ID:**', value=f'`{aft.id}`')
embed.add_field(name='**USER ID:**', value=f'`{bfr.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_message_delete(msg):
if not hasattr(msg.channel, 'guild'):
return
guild_id = msg.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(
color=dc.Color.darker_grey(),
timestamp=msg.created_at,
description=msg.content,
)
embed.set_author(
name=f'@{msg.author} deleted a message in #{msg.channel}:',
icon_url=msg.author.avatar_url,
)
embed.add_field(name='**MESSAGE ID:**', value=f'`{msg.id}`')
embed.add_field(name='**USER ID:**', value=f'`{msg.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_member_join(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
await role_saver.load_roles(member)
embed = dc.Embed(
color=dc.Color.green(),
timestamp=datetime.utcnow(),
description=f':green_circle: **{member}** has joined **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n'
f'This account was created on `{member.created_at.strftime("%d/%m/%Y %H:%M:%S")}`'
)
embed.set_author(name=f'A user has joined the server!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_remove(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
role_saver.save_roles(member)
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1)) # Moved grabbing last seen to a function
if lastseen is not None:
lastseenmsg = f'This user was last seen on `{lastseen.strftime("%d/%m/%Y %H:%M:%S")}`'
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(
color=dc.Color.red(),
timestamp=timestamp,
description=f':red_circle: **{member}** has left **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n{lastseenmsg}'
)
embed.set_author(name=f'A user left or got beaned!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(
name='**ROLES SNAGGED:**',
value=(', '.join(
f'`{guild.get_role(role).name}`'
for role in role_saver.get_roles(member)
)
or None),
inline=False)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_update(bfr, aft): # Log role and nickname changes
guild = bfr.guild
if guild.id in guild_config.mod_channels:
changetype = None
if bfr.nick != aft.nick:
changetype = 'Nickname Update:'
changelog = f'**{bfr}** had their nickname changed to **{aft.nick}**'
if bfr.roles != aft.roles:
changetype = 'Role Update:'
diffrole = next(iter(set(aft.roles) ^ set(bfr.roles)))
difftype = 'added' if len(bfr.roles) < len(aft.roles) else 'removed'
changelog = f'**{aft}** had the following role {difftype}: `{diffrole.name}`'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.blue(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=aft.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_user_update(bfr, aft): # Log avatar, name, discrim changes
for guild in bot.guilds:
if guild.get_member(bfr.id) is not None:
changetype = None
if bfr.name != aft.name:
changetype = 'Username Update:'
changelog = f'@{bfr} has changed their username to {aft}'
if bfr.discriminator != aft.discriminator:
changetype = 'Discriminator Update:'
changelog = (
f'@{bfr} had their discriminator changed from '
f'{bfr.discriminator} to {aft.discriminator}'
)
if bfr.avatar != aft.avatar:
changetype = 'Avatar Update:'
changelog = f'@{bfr} has changed their avatar to:'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.purple(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=bfr.avatar_url)
if changetype.startswith('Avatar'):
embed.set_thumbnail(url=f'{aft.avatar_url}')
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
#END OF EVENTS
@bot.command()
async def slap(ctx, arg):
await ctx.send("You have slapped {1}!" .format(ctx, arg))
@bot.command()
async def hello(ctx):
await ctx.send("Hello, World!")
@bot.command()
async def echo(ctx, arg):
await ctx.send(arg)
@bot.command()
async def roll(ctx, arg):
value = randint(1, int(arg))
await ctx.send("You have rolled a {1}!" .format(ctx, value))
@bot.command()
async def help(ctx):
embed = dc.Embed(
color=ctx.author.color,
timestamp=ctx.message.created_at,
description=f'It seems you have asked about the Homestuck and Hiveswap Discord Utility Bot:tm:.'
f'This is a bot designed to cater to the server\'s moderation, utility, and statistic '
f'tracking needs. If the functions herein described are not performing to the degree '
f'that is claimed, please direct your attention to Wizard of Chaos#2459.\n\n'
f'**Command List:**',
)
embed.set_author(name='Help message', icon_url=bot.user.avatar_url)
embed.add_field(name='`help`', value='Display this message.', inline=False)
embed.add_field(
name='`info [username]`',
value='Grabs user information. Leave username empty to get your own info.',
inline=False
)
embed.add_field(name='`ping`', value='Pong!', inline=False)
embed.add_field(
name='`config (msglog|usrlog)`',
value='(Manage Server only) Sets the appropriate log channel.',
inline=False
)
await ctx.send(embed=embed)
@bot.command()
async def info(ctx, member : str=None):
if member is not None:
for gmember in ctx.guild.members:
if member == gmember.display_name:
member = gmember
break
else:
await ctx.send(
'It seems that user can\'t be found. Please check your spelling. '
'Alternatively, try adding double quotes ("") around the name.'
)
return
else:
member = ctx.author
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1))
if lastseen is not None:
lastseenmsg = lastseen.strftime("%d/%m/%Y %H:%M:%S")
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(color=member.color, timestamp=timestamp)
embed.set_author(name=f'Information for {member}')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='User ID:', value=f'{member.id}')
embed.add_field(name='Last Seen:', value=lastseenmsg, inline=False)
embed.add_field(name='Account Created On:', value=member.created_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Guild Joined On:', value=member.joined_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Roles:', value=', '.join(f'`{role.name}`' for role in member.roles[1:]), inline=False)
if ctx.author != member:
msg = 'It seems you\'re a bit of a stalker, aren\'t you?'
else:
msg = None
await ctx.send(msg, embed=embed)
@bot.command()
async def ping(ctx):
await ctx.send(f'Pong, <@!{ctx.message.author.id}>!')
@bot.group()
async def config(ctx):
if ctx.invoked_subcommand is None:
await ctx.send(
'It seems that you have attempted to run a nonexistent command. '
'Would you like to try again? Redos are free, you know.'
)
@config.command()
async def usrlog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'usrlog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
@config.command()
async def msglog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'msglog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
#GAME EVENT
#ABANDON ALL HOPE YE WHO GO BELOW HERE
@bot.command()
async def rogue_game(ctx):
await ctx.send("Game started! Choose a starting buff - 'Health' or 'Damage'.")
def check(m):
if m.author == ctx.author:
return m.content == "Health" or m.content == "Damage" or m.content == "CMSC280 FREE PASS"
else:
return False
gamer = Player() #Initializing player class
msg = await bot.wait_for("message", check=check)
if msg.content == "Health":
await ctx.send("+25 HP!")
gamer.heal(25)
elif msg.content == "Damage":
await ctx.send("+5 Damage!")
gamer.dangerify(5)
elif msg.content == "CMSC280 FREE PASS":
await ctx.send("Free shield!")
gamer.get_shield(1)
gamer.get_shield(0)
await ctx.send("OPTIONS: You can 'Block', 'Dodge' or 'Attack' a monster. Alternatively, you may 'Die'.")
slain_enemies = 0
def continue_check(m): #Check used several times
if m.author == ctx.author:
return m.content == "Yes" or m.content == "No"
else:
return False
while gamer.life() == True:
game_roll = randint(1, 1) #placeholder
if game_roll == 1:
#Monster speed is between 5 and 12.
#Monster health is between 40 and 120.
#Monster damage is between 5 and 20.
#Monster damage type is random one or the other (physical or magical).
m_speed = randint(5, 12)
m_hp = randint(40, 120)
m_dmg = randint(5, 20)
m_type = randint(0, 1)
danger = Monster(m_speed, m_dmg, m_hp, m_type) #Initializing monster class
print(f"Monster generated.")
await ctx.send("There is a beast, and you must tenderize it!")
while danger.life() == True:
await ctx.send("Monsters speed is {1}, damage {2}, health {3}." .format(ctx, danger.speed(), danger.damage(), danger.health()))
m_attk_str = danger.make_attack()
m_attk = m_attk_str.split(" ")
if "0" in m_attk:
await ctx.send("The monster is about to bite you!")
elif "1" in m_attk:
await ctx.send("The monster is about to breathe fire at you!")
def game_response(m): #Player response
if m.author == ctx.author:
return m.content == "Block" or m.content == "Dodge" or m.content == "Attack" or m.content == "Die"
else:
return False
#Reactions to the monster's attack
try:
g_msg = await bot.wait_for("message",timeout=m_speed, check=game_response)
if g_msg.content == "Block":
if "0" in m_attk:
if gamer.shield_type() == 1 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield shatters from the force of the blow.")
else:
await ctx.send("You try to block it, but your shield isn't rated for this kind of damage!")
bp_damage = int(m_attk[1])
gamer.take_hit(bp_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if "1" in m_attk:
if gamer.shield_type() == 2 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield falls to pieces in a burst of multicolored light.")
else:
await ctx.send("The magical assault burns right through your shield!")
bm_damage = int(m_attk[1])
gamer.take_hit(bm_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if g_msg.content == "Dodge":
await ctx.send("You roll to one side, avoiding some of the damage!")
d_damage = int(m_attk[1])
hit = d_damage - randint(5, 18)
gamer.take_hit(hit)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Attack":
await ctx.send("You strike at the monster, but in doing so, expose yourself to the blow!") #Heh. Expose yourself. Good one, me.
a_damage = int(m_attk[1])
hit = a_damage + randint(5, 10)
gamer.take_hit(hit)
danger.take_hit(gamer.damage())
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Die":
await ctx.send("You die before the blow hits, confusing the monster.")
gamer.take_hit(gamer.health())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough! The attack lands!")
t_damage = int(m_attk[1])
gamer.take_hit(t_damage)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if gamer.life() == False:
break
await ctx.send("The monster rears back! Quickly, hit the thing!")
def attack_response(m):
if m.author == ctx.author:
return m.content == "Attack"
else:
return False
try:
a_msg = await bot.wait_for("message", timeout=m_speed, check=attack_response)
if a_msg.content == "Attack":
await ctx.send("You hit the monster!")
danger.take_hit(gamer.damage())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough!")
#Right, by this point, the monster has attacked, and the player has attacked.
#Need to check if the player is dead or not.
if gamer.life() == False:
break
#Only other option now is that the monster is still alive, requiring another turn, or it's dead, in which case...
#We should end up here, outside the loop.
if gamer.life() == True: #Necessary. Can break above loop without being alive, due to 'Die'.
await ctx.send("The monster has been defeated.")
slain_enemies = slain_enemies + 1
lootroll = randint(0, 4)
#Five cases. 0 - nothing. 1 - Physical shield. 2 - Magic shield. 3 - Health. 4 - Damage.
if lootroll == 0:
await ctx.send("The monster dropped nothing.")
if lootroll == 1:
await ctx.send("In the monster's digestive tract, you find a metal shield!")
gamer.get_shield(0)
if lootroll == 2:
await ctx.send("In the monster's spleen, you find a runic shield, glowing with spellcraft!")
gamer.get_shield(1)
if lootroll == 3:
healthroll = randint(5, 30)
await ctx.send("The monster's blood is a powerful restorative! You heal for {1}." .format(ctx, healthroll))
gamer.heal(healthroll)
if lootroll == 4:
dmgroll = randint(3, 12)
await ctx.send("You monster's bones make an excellent weapon! Your damage increases by {1}." .format(ctx, dmgroll))
gamer.dangerify(dmgroll)
#Loot handled. Looping again after describing player stats.
await ctx.send("Your health is {1} and your damage is {2}." .format(ctx, gamer.health(), gamer.damage()))
if gamer.shield_type() == 0:
await ctx.send("You have no shield.")
elif gamer.shield_type() == 1:
await ctx.send("You have a sturdy metal shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 2:
await ctx.send("You have a rune-inscribed shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 3:
await ctx.send("You have an inscribed metal shield. Powerful! It can take {1} more hits." .format(ctx, gamer.shield_dur()))
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if con_msg.content == "No":
break
#End of combat loop. Player is dead.
if game_roll == 2:
await ctx.send("You encounter a great and terrible wizard.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 3:
await ctx.send("You stumble into a trap!")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 4:
await ctx.send("Rocks fall, everyone dies.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 5:
await ctx.send("A man just walks up and punches you. What a jerk.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
#Placeholder maneuvers. Plan to expand game later with more events.
#Get duel working for demo
await ctx.send("You have died. Nice try, though! You killed {1} monsters." .format(ctx, slain_enemies))
@bot.command()
#Shoutout to my friend Janine for helping me cut this beast of a function in half.
async def duel(ctx, *, member):
await ctx.send("You have challenged {1} to a duel! How do you respond {1}?".format(ctx, member))
duelee = member # Discord member, shown as 'Wizard of Chaos#2459' or similar
player1 = Player()
dueler = ctx.author # ditto
player2 = Player()
def filter_tokens(msg, tokens):
"""Returns a list of tokens from the sequence that appear in the message."""
text = msg.content.strip().lower()
return [t for t in tokens if t in text]
def check(m): # Check if duel is accepted
return m.author == duelee and bool(filter_tokens(m, ('accept', 'decline')))
try:
msg = await bot.wait_for("message", timeout=20, check=check)
tokens = filter_tokens(msg, ('accept', 'decline'))
if len(tokens) > 1:
await ctx.send("Your indecision has weirded out your opponent. Good job.")
return
if 'decline' == tokens[0]:
await ctx.send("You have declined the challenge, everyone judges you.") #Coward.
return
if 'accept' == tokens[0]:
await ctx.send("You have accepted the duel!")
except asyncio.TimeoutError:
await ctx.send("{1} appears to be absent. Coward.".format(ctx, duelee))
return
await ctx.send(
"The duel has begun. The three attacks are 'critical strike', 'power attack', and 'flurry'. "
"You can hit someone from the 'left' or the 'right', or just not pick a direction. "
"You can also 'die'."
)
await ctx.send(
"Critical strikes cannot be parried. "
"Power attacks cannot be parried or blocked. "
"Flurries cannot be blocked or dodged effectively."
)
#Slightly more in-depth explanation:
#Critical strikes are blocked from the same direction they came in.
#Attempting to roll in any direction other than the opposite of the incoming attack results in a hit.
#Critical strikes cannot be parried, like, at all.
#Flurries must be parried from the same direction. They can be dodged for reduced damage. They cannot be blocked.
#Power attacks cannot be blocked or parried and MUST be dodged, to the opposite of the incoming direction.
#Dodges have to go in the opposite direction or they fail.
#Attack / defense checks based on incoming messages
def attack_check(m, a):
return m.author == a and bool(filter_tokens(m, attacks))
def defense_check(m, a):
return m.author == a and bool(filter_tokens(m, defenses))
atk_time = 5 # Reaction time for players in seconds, set to 10 for demo, 5 during actual play
attacks = ("critical strike", "flurry", "power attack", "die")
defenses = ("parry", "dodge", "block", "die")
dirs = ("left", "right")
while True: # External infinite loop.
for actor1, actor2, stats1, stats2 in ((duelee, dueler, player1, player2), (dueler, duelee, player2, player1)): # Turn order loop.
if not(player2.life() and player1.life()): # Check if either player died during any turn.
await ctx.send("{1} wins!".format(ctx, duelee if player1.life() else dueler))
return
await ctx.send("It's {1}'s turn to attack.".format(ctx, actor1))
try:
a1_msg = await bot.wait_for("message", timeout=20, check=lambda m: attack_check(m, actor1))
except asyncio.TimeoutError:
await ctx.send("{1} does nothing.".format(ctx, actor1))
continue
attack_tokens = filter_tokens(a1_msg, attacks)
attack_dirs = filter_tokens(a1_msg, dirs)
if len(attack_tokens) > 1 or len(attack_dirs) > 1:
await ctx.send("{1} has wasted too much time on indecisive action and got confused!".format(ctx, actor1))
continue
attack_token = attack_tokens[0]
attack_dir = attack_dirs[0] if attack_dirs else "top"
if "die" == attack_token:
await ctx.send("{1} screams that {2} will never understand their pain, then slits their wrists!".format(ctx, actor1, actor2))
stats1.take_hit(100) # It's no surprise the emo movement failed, no surprise at all.
continue
await ctx.send("{1} throws out a {2} from the {3}!".format(ctx, actor1, attack_token, attack_dir))
try:
a2_msg = await bot.wait_for("message", timeout=atk_time, check=lambda m: defense_check(m, actor2))
except asyncio.TimeoutError:
await ctx.send("{1} doesn't move fast enough, and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
continue
defense_tokens = filter_tokens(a2_msg, defenses)
defense_dirs = filter_tokens(a2_msg, dirs)
if len(defense_tokens) > 1 or len(defense_dirs) > 1:
await ctx.send("{1} doesn't get their act together fast enough and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, player2.health()))
continue
defense_token = defense_tokens[0]
defense_dir = defense_dirs[0] if defense_dirs else "top"
if "die" == defense_token:
await ctx.send("{1} accepts their fate and allows the blow to crush their skull!".format(ctx, actor2))
stats2.take_hit(100)
continue
# A whole bunch of if/elif/else chains. Asyncio REALLY does not like when you try to call outside functions.
# CRITICAL STRIKE:
if "critical strike" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_token:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
if defense_dir != "top":
await ctx.send("{1} fails to block the central strike!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
#All critical strike maneuvers handled.
#FLURRY:
if "flurry" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges most of the blows, but takes one hit anyway!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
#Flurry maneuvers handled.
#POWER ATTACK:
if "power attack" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir:
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
# Power attacks handled.
# All attacks handled. Next player's attack.
#END DUEL
if __name__ == '__main__':
bot.run(get_token())
| 48.788298
| 151
| 0.536556
| 5,673
| 45,861
| 4.26159
| 0.121805
| 0.053938
| 0.080907
| 0.051084
| 0.553069
| 0.517704
| 0.48362
| 0.452308
| 0.442422
| 0.432081
| 0
| 0.021031
| 0.349927
| 45,861
| 940
| 152
| 48.788298
| 0.78989
| 0.070256
| 0
| 0.496871
| 0
| 0.006258
| 0.209737
| 0.003454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035044
| false
| 0.002503
| 0.015019
| 0.015019
| 0.093867
| 0.002503
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e4e551502750847910375c9545b7c251613085
| 2,775
|
py
|
Python
|
ProyectoDAI/settings.py
|
javiergarridomellado/proyectodai
|
64944d10f543c3094630056906b5f101a73bdd7b
|
[
"Apache-2.0"
] | 1
|
2019-08-21T17:21:13.000Z
|
2019-08-21T17:21:13.000Z
|
ProyectoDAI/settings.py
|
javiergarridomellado/proyectodai
|
64944d10f543c3094630056906b5f101a73bdd7b
|
[
"Apache-2.0"
] | null | null | null |
ProyectoDAI/settings.py
|
javiergarridomellado/proyectodai
|
64944d10f543c3094630056906b5f101a73bdd7b
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for TusPachangas project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import django
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '26*swq94+rg+-2tc2es6j&d#&(g4@@xe7vh1hu1)6*z^v@pd2q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'registration', #add in the registration package
'rest_framework',
'restaurante',
'easy_maps',
)
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ProyectoDAI.urls'
WSGI_APPLICATION = 'ProyectoDAI.wsgi.application'
TEMPLATE_DIRS = (TEMPLATE_PATH,)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
ON_HEROKU = os.environ.get('PORT')
if ON_HEROKU:
DATABASE_URL='postgres://kytzveedsclzaf:eIJAAuElYvSxPK-vmSdXG9Hjv8@ec2-107-21-219-235.compute-1.amazonaws.com:5432/df9sfr7a9b8vjf'
DATABASES = {'default': dj_database_url.config(default=DATABASE_URL)}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
STATIC_PATH,
)
#Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 25.458716
| 131
| 0.735856
| 353
| 2,775
| 5.665722
| 0.453258
| 0.065
| 0.066
| 0.075
| 0.16
| 0.1345
| 0.1135
| 0.1135
| 0.04
| 0
| 0
| 0.023977
| 0.128288
| 2,775
| 108
| 132
| 25.694444
| 0.802811
| 0.312793
| 0
| 0
| 0
| 0.033898
| 0.45778
| 0.358471
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.050847
| 0
| 0.050847
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e63ae87cd65917f81764b65113935c80fb3646
| 1,150
|
py
|
Python
|
util.py
|
monokim/CheesyBullets
|
eeb5a79a69936701ff7962b846e6310f7df91cb0
|
[
"BSD-3-Clause"
] | 1
|
2021-09-28T01:02:31.000Z
|
2021-09-28T01:02:31.000Z
|
util.py
|
monokim/CheesyBullets
|
eeb5a79a69936701ff7962b846e6310f7df91cb0
|
[
"BSD-3-Clause"
] | null | null | null |
util.py
|
monokim/CheesyBullets
|
eeb5a79a69936701ff7962b846e6310f7df91cb0
|
[
"BSD-3-Clause"
] | 1
|
2021-09-28T01:02:32.000Z
|
2021-09-28T01:02:32.000Z
|
import time
import pyautogui
import win32gui
def get_screen_rect(caption='CheesyBullets'):
hwnd = win32gui.FindWindow(None, caption)
rect = win32gui.GetWindowRect(hwnd)
screen_rect = (rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1])
return rect
class Timer():
def __init__(self):
self.times = []
self.cnt = 0
def set_timer(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
t[0] = time.time()
break
if flag == False:
self.times.append([time.time(), name])
def print_time(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
print(name + " takes (%.5f)s" % (time.time() - t[0]))
break
if flag == False:
raise Exception("There is no timer")
def delete_timer(self, name = None):
for i, t in enumerate(self.times):
if t[1] == name:
self.times.pop(i)
break
| 26.744186
| 74
| 0.508696
| 144
| 1,150
| 3.993056
| 0.340278
| 0.093913
| 0.026087
| 0.036522
| 0.276522
| 0.276522
| 0.276522
| 0.276522
| 0.276522
| 0.276522
| 0
| 0.025921
| 0.362609
| 1,150
| 42
| 75
| 27.380952
| 0.758527
| 0
| 0
| 0.428571
| 0
| 0
| 0.046957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.085714
| 0
| 0.285714
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e7c60f7d5d1f6c613e5dbae742091e0b76d703
| 2,301
|
py
|
Python
|
Gelatin/parser/Parser.py
|
Etherbay/Gelatin
|
d2afa85a48034d6ee34580e49e16542f31ad208e
|
[
"MIT"
] | 107
|
2015-01-26T21:37:57.000Z
|
2022-02-25T16:28:44.000Z
|
Gelatin/parser/Parser.py
|
Etherbay/Gelatin
|
d2afa85a48034d6ee34580e49e16542f31ad208e
|
[
"MIT"
] | 20
|
2015-11-23T14:09:37.000Z
|
2021-02-11T17:57:24.000Z
|
Gelatin/parser/Parser.py
|
Etherbay/Gelatin
|
d2afa85a48034d6ee34580e49e16542f31ad208e
|
[
"MIT"
] | 34
|
2015-01-05T18:47:34.000Z
|
2020-12-13T06:47:26.000Z
|
# Copyright (c) 2010-2017 Samuel Abels
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import codecs
from simpleparse import parser
from .Newline import Newline
from .Indent import Indent
from .Dedent import Dedent
from .util import error
_ebnf_file = os.path.join(os.path.dirname(__file__), 'syntax.ebnf')
with open(_ebnf_file) as _thefile:
_ebnf = _thefile.read()
class Parser(parser.Parser):
def __init__(self):
self.indent = 0
offside = (
("NEWLINE", Newline(self).table()),
("INDENT", Indent(self).table()),
("DEDENT", Dedent(self).table()),
)
parser.Parser.__init__(self, _ebnf, 'root', prebuilts=offside)
def parse_string(self, input, compiler):
compiler.reset()
start, _, end = parser.Parser.parse(self, input, processor=compiler)
if end < len(input):
error(input, end)
if 'input' not in compiler.context.grammars:
error(input, end, 'Required grammar "input" not found.')
return compiler.context
def parse(self, filename, compiler, encoding='utf8'):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
string = input_file.read()
return self.parse_string(string, compiler)
| 40.368421
| 80
| 0.704911
| 313
| 2,301
| 5.102236
| 0.460064
| 0.055103
| 0.016281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005488
| 0.20817
| 2,301
| 56
| 81
| 41.089286
| 0.871021
| 0.459365
| 0
| 0
| 0
| 0
| 0.064542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.225806
| 0
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0e93ce753097ffb23fb7c437281488fb715e819
| 308
|
py
|
Python
|
C03-Unit-Testing/21-C03V15/utils.py
|
dirchev/Python-101-Forever-1
|
13c3bb182747aae244ae6f9fd6f79c8223f3e9a6
|
[
"MIT"
] | 59
|
2021-02-05T10:40:08.000Z
|
2022-01-26T08:30:43.000Z
|
C03-Unit-Testing/21-C03V15/utils.py
|
dirchev/Python-101-Forever-1
|
13c3bb182747aae244ae6f9fd6f79c8223f3e9a6
|
[
"MIT"
] | null | null | null |
C03-Unit-Testing/21-C03V15/utils.py
|
dirchev/Python-101-Forever-1
|
13c3bb182747aae244ae6f9fd6f79c8223f3e9a6
|
[
"MIT"
] | 10
|
2021-02-13T16:50:26.000Z
|
2022-03-20T12:17:00.000Z
|
BIG_CONSTANT = "YES"
def group_by(xs, grouper):
groups = {}
for x in xs:
group = grouper(x)
if group not in groups:
groups[group] = []
groups[group].append(x)
return groups
print(group_by([1, 2, 3, 4, 5, 6], lambda x: "even" if x % 2 == 0 else "odd"))
| 16.210526
| 78
| 0.529221
| 47
| 308
| 3.404255
| 0.595745
| 0.0875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.324675
| 308
| 18
| 79
| 17.111111
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0.032468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0eb718b5df49f7654c2a9064eafc5186c980c9e
| 3,721
|
py
|
Python
|
pipeline/test_sftp_to_s3.py
|
streamsets/datacollector-tests-external
|
6f255b5e7496deeef333b57a5e9df4911ba3ef00
|
[
"Apache-2.0"
] | 1
|
2020-04-14T03:01:51.000Z
|
2020-04-14T03:01:51.000Z
|
pipeline/test_sftp_to_s3.py
|
streamsets/datacollector-tests-external
|
6f255b5e7496deeef333b57a5e9df4911ba3ef00
|
[
"Apache-2.0"
] | null | null | null |
pipeline/test_sftp_to_s3.py
|
streamsets/datacollector-tests-external
|
6f255b5e7496deeef333b57a5e9df4911ba3ef00
|
[
"Apache-2.0"
] | 1
|
2019-09-14T08:30:23.000Z
|
2019-09-14T08:30:23.000Z
|
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
import os
import string
import time
from streamsets.sdk.models import Configuration
from streamsets.testframework.markers import aws, sftp, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Sandbox prefix for S3 bucket
S3_BUCKET_PREFIX = 'sftp_upload'
@sdc_min_version('3.8.2')
@sftp
@aws('s3')
def test_sftp_origin_whole_file_to_s3(sdc_builder, sdc_executor, sftp, aws):
"""
This is a test for SDC-11273. First, it creates a large (~6MB) file and puts it on the SFTP server.
Then, it creates a pipeline with SFTP origin and S3 destination, with whole file format, and runs
until the single record (file) is complete. Then, it asserts the S3 bucket contents are correct.
It passes only if the new option ("Disable Read Ahead Stream") is enabled.
"""
sftp_file_name = get_random_string(string.ascii_letters, 10) + '.txt'
raw_text_data = get_random_string(string.printable, 6000000)
sftp.put_string(os.path.join(sftp.path, sftp_file_name), raw_text_data)
s3_bucket = aws.s3_bucket_name
s3_key = f'{S3_BUCKET_PREFIX}/{sftp_file_name}'
# Build the pipeline
builder = sdc_builder.get_pipeline_builder()
sftp_ftp_client = builder.add_stage(name='com_streamsets_pipeline_stage_origin_remote_RemoteDownloadDSource')
sftp_ftp_client.file_name_pattern = sftp_file_name
sftp_ftp_client.data_format = 'WHOLE_FILE'
sftp_ftp_client.set_attributes(disable_read_ahead_stream=True)
s3_destination = builder.add_stage('Amazon S3', type='destination')
s3_destination.file_name_expression = "${record:value('/fileInfo/filename')}"
s3_destination.set_attributes(bucket=s3_bucket, data_format='WHOLE_FILE', partition_prefix=s3_key)
sftp_ftp_client >> s3_destination
sftp_to_s3_pipeline = builder.build(title='SFTP to S3 Whole File').configure_for_environment(aws).configure_for_environment(sftp)
sdc_executor.add_pipeline(sftp_to_s3_pipeline)
client = aws.s3
try:
# start pipeline and run for one record (the file)
sdc_executor.start_pipeline(sftp_to_s3_pipeline).wait_for_pipeline_output_records_count(1)
sdc_executor.stop_pipeline(sftp_to_s3_pipeline)
# assert record count to S3 the size of the objects put
list_s3_objs = client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)
assert len(list_s3_objs['Contents']) == 1
# read data from S3 to assert contents
s3_contents = [client.get_object(Bucket=s3_bucket, Key=s3_content['Key'])['Body'].read().decode().strip()
for s3_content in list_s3_objs['Contents']]
# compare the S3 bucket contents against the original whole file contents
assert s3_contents[0] == raw_text_data
finally:
delete_keys = {'Objects': [{'Key': k['Key']}
for k in client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=s3_bucket, Delete=delete_keys)
| 42.770115
| 133
| 0.742274
| 546
| 3,721
| 4.804029
| 0.360806
| 0.036599
| 0.032024
| 0.0244
| 0.060999
| 0.033549
| 0.033549
| 0.033549
| 0.033549
| 0.033549
| 0
| 0.022772
| 0.173878
| 3,721
| 86
| 134
| 43.267442
| 0.830514
| 0.318463
| 0
| 0
| 0
| 0
| 0.106753
| 0.055398
| 0
| 0
| 0
| 0
| 0.044444
| 1
| 0.022222
| false
| 0
| 0.2
| 0
| 0.222222
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ed1ce1a37b62c8113cea099de0a407123519d8
| 950
|
py
|
Python
|
terra/tests/__init__.py
|
NoahRJohnson/terra
|
131954ee42fb5905ceff35101e34d89c5eb6de6c
|
[
"MIT"
] | null | null | null |
terra/tests/__init__.py
|
NoahRJohnson/terra
|
131954ee42fb5905ceff35101e34d89c5eb6de6c
|
[
"MIT"
] | null | null | null |
terra/tests/__init__.py
|
NoahRJohnson/terra
|
131954ee42fb5905ceff35101e34d89c5eb6de6c
|
[
"MIT"
] | null | null | null |
import os
# Use this as a package level setup
def load_tests(loader, standard_tests, pattern):
if os.environ.get('TERRA_UNITTEST', None) != "1":
print('WARNING: Running terra tests without setting TERRA_UNITTEST will '
'result in side effects such as extraneouse log files being '
'generated')
this_dir = os.path.dirname(__file__)
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# Run this test last, to make sure none of the other tests degrated the
# integrity of terra. A configured terra can cause unittests to interfere
# with each other
loader.testMethodPrefix = 'last'
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# This does not check THIS file for 'last', I can't figure that out, cause
# it is "discovered" before load_tests is ever called
return standard_tests
| 38
| 77
| 0.749474
| 139
| 950
| 4.971223
| 0.57554
| 0.075253
| 0.052098
| 0.075253
| 0.254703
| 0.254703
| 0.254703
| 0.254703
| 0.254703
| 0.254703
| 0
| 0.001279
| 0.176842
| 950
| 24
| 78
| 39.583333
| 0.882353
| 0.332632
| 0
| 0.307692
| 0
| 0
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0edc031d7ad458b3382ce23bb1ea18d6941bcf3
| 392
|
py
|
Python
|
icons/svg2png.py
|
benburrill/formiko
|
86630506c537f9517666d9b0d5b2a905e7385b01
|
[
"BSD-3-Clause"
] | 116
|
2016-07-13T00:35:35.000Z
|
2022-02-22T15:46:44.000Z
|
icons/svg2png.py
|
benburrill/formiko
|
86630506c537f9517666d9b0d5b2a905e7385b01
|
[
"BSD-3-Clause"
] | 32
|
2018-01-23T13:50:27.000Z
|
2022-03-30T05:34:56.000Z
|
icons/svg2png.py
|
benburrill/formiko
|
86630506c537f9517666d9b0d5b2a905e7385b01
|
[
"BSD-3-Clause"
] | 8
|
2018-12-21T13:45:36.000Z
|
2021-11-07T22:40:05.000Z
|
# -*- coding: utf-8 -*-
from gi.repository.GdkPixbuf import Pixbuf
from os import makedirs
def main():
for size in (16, 22, 24, 32, 48, 64, 128, 256, 512):
icon = Pixbuf.new_from_file_at_scale("formiko.svg", size, size, True)
makedirs("%dx%d" % (size, size))
icon.savev("%dx%d/formiko.png" % (size, size), "png", [], [])
if __name__ == "__main__":
main()
| 24.5
| 77
| 0.591837
| 57
| 392
| 3.859649
| 0.684211
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071895
| 0.219388
| 392
| 15
| 78
| 26.133333
| 0.647059
| 0.053571
| 0
| 0
| 0
| 0
| 0.119241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ef4cd1fe213247f6cb053cb5a43dff995c8928
| 5,778
|
py
|
Python
|
etna/transforms/decomposition/trend.py
|
tinkoff-ai/etna-ts
|
ded5161ed49f5c2697778825f899842ee30c6c61
|
[
"Apache-2.0"
] | 96
|
2021-09-05T06:29:34.000Z
|
2021-11-07T15:22:54.000Z
|
etna/transforms/decomposition/trend.py
|
geopars/etna
|
ded5161ed49f5c2697778825f899842ee30c6c61
|
[
"Apache-2.0"
] | 188
|
2021-09-06T15:59:58.000Z
|
2021-11-17T09:34:16.000Z
|
etna/transforms/decomposition/trend.py
|
geopars/etna
|
ded5161ed49f5c2697778825f899842ee30c6c61
|
[
"Apache-2.0"
] | 8
|
2021-09-06T09:18:35.000Z
|
2021-11-11T21:18:39.000Z
|
from typing import Optional
import pandas as pd
from ruptures import Binseg
from ruptures.base import BaseCost
from sklearn.linear_model import LinearRegression
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.decomposition.change_points_trend import BaseEstimator
from etna.transforms.decomposition.change_points_trend import TDetrendModel
from etna.transforms.decomposition.change_points_trend import _OneSegmentChangePointsTrendTransform
class _OneSegmentTrendTransform(_OneSegmentChangePointsTrendTransform):
"""_OneSegmentTrendTransform adds trend as a feature."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _OneSegmentTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend from data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.out_column = out_column
super().__init__(
in_column=in_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Add column with trend, got from the detrend_model.
Parameters
----------
df:
data to get trend from
Returns
-------
pd.DataFrame:
df with trend column
"""
df._is_copy = False
series = df[self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df[self.out_column] = trend_series
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Inverse transform dataframe.
Parameters
----------
df:
one segment dataframe
Returns
-------
pd.DataFrame:
given dataframe
"""
return df
class _TrendTransform(PerSegmentWrapper):
"""_TrendTransform adds trend as a feature. Creates column 'regressor_<in_column>_trend'."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
super().__init__(
transform=_OneSegmentTrendTransform(
in_column=in_column,
out_column=out_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
)
class TrendTransform(_TrendTransform):
"""TrendTransform adds trend as a feature.
TrendTransform uses Binseg model as a change point detection model in _TrendTransform.
"""
def __init__(
self,
in_column: str,
out_column: Optional[str] = None,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column. Don't forget to add regressor prefix if necessary.
If not given, use 'regressor_{self.__repr__()}'
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.in_column = in_column
self.out_column = out_column
self.detrend_model = detrend_model
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=self.in_column,
out_column=self.out_column if self.out_column is not None else f"regressor_{self.__repr__()}",
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=self.detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
| 31.232432
| 122
| 0.606265
| 630
| 5,778
| 5.28254
| 0.206349
| 0.059495
| 0.081731
| 0.055288
| 0.417969
| 0.399038
| 0.379207
| 0.356971
| 0.288462
| 0.288462
| 0
| 0.001782
| 0.320007
| 5,778
| 184
| 123
| 31.402174
| 0.845253
| 0.338525
| 0
| 0.406977
| 0
| 0
| 0.008998
| 0.008377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.104651
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f004be552defdc3e85fe514fc0037369c084b9
| 4,027
|
py
|
Python
|
argopy/tests/test_fetchers_facade_index.py
|
schwehr/argopy
|
1b35d5cfb87b2f9ccd2ca45b9987a614edd30700
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_fetchers_facade_index.py
|
schwehr/argopy
|
1b35d5cfb87b2f9ccd2ca45b9987a614edd30700
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_fetchers_facade_index.py
|
schwehr/argopy
|
1b35d5cfb87b2f9ccd2ca45b9987a614edd30700
|
[
"Apache-2.0"
] | null | null | null |
import xarray as xr
import pytest
import warnings
import argopy
from argopy import IndexFetcher as ArgoIndexFetcher
from argopy.errors import InvalidFetcherAccessPoint, InvalidFetcher, ErddapServerError, DataNotFound
from . import (
AVAILABLE_INDEX_SOURCES,
requires_fetcher_index,
requires_connected_erddap_index,
requires_localftp_index,
requires_connection,
safe_to_server_errors
)
class Test_Facade:
src = list(AVAILABLE_INDEX_SOURCES.keys())[0]
def test_invalid_fetcher(self):
with pytest.raises(InvalidFetcher):
ArgoIndexFetcher(src="invalid_fetcher").to_xarray()
@requires_fetcher_index
def test_invalid_accesspoint(self):
# Use the first valid data source
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).invalid_accesspoint.to_xarray() # Can't get data if access point not defined first
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).to_xarray() # Can't get data if access point not defined first
@requires_fetcher_index
def test_invalid_dataset(self):
with pytest.raises(ValueError):
ArgoIndexFetcher(src=self.src, ds='dummy_ds')
@requires_connection
@requires_fetcher_index
class Test_AllBackends:
""" Test main API facade for all available index fetching backends """
local_ftp = argopy.tutorial.open_dataset("localftp")[0]
# todo Determine the list of output format to test
# what else beyond .to_xarray() ?
fetcher_opts = {}
# Define API entry point options to tests:
# These should be available online and with the argopy-data dummy gdac ftp
args = {}
args["float"] = [[2901623], [6901929, 2901623]]
args["region"] = [
[-60, -40, 40.0, 60.0],
[-60, -40, 40.0, 60.0, "2007-08-01", "2007-09-01"],
]
args["profile"] = [[2901623, 2], [6901929, [5, 45]]]
def __test_float(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["float"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).float(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_profile(self, bk, **ftc_opts):
""" Test profile index fetching for a given backend """
for arg in self.args["profile"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).profile(*arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_region(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["region"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).region(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
@safe_to_server_errors
def test_float_erddap(self):
self.__test_float("erddap")
@requires_localftp_index
def test_float_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_float("localftp", index_file="ar_index_global_prof.txt")
@requires_localftp_index
def test_profile_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_profile("localftp", index_file="ar_index_global_prof.txt")
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
def test_region_erddap(self):
self.__test_region("erddap")
@requires_localftp_index
def test_region_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_region("localftp", index_file="ar_index_global_prof.txt")
| 35.955357
| 100
| 0.674696
| 503
| 4,027
| 5.159046
| 0.256461
| 0.029672
| 0.027746
| 0.03237
| 0.536416
| 0.519075
| 0.45896
| 0.45896
| 0.350289
| 0.350289
| 0
| 0.025722
| 0.218028
| 4,027
| 111
| 101
| 36.279279
| 0.798349
| 0.132108
| 0
| 0.308642
| 0
| 0
| 0.090358
| 0.020785
| 0
| 0
| 0
| 0.009009
| 0.037037
| 1
| 0.135802
| false
| 0
| 0.08642
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f00c9b59cb978159364c5072c66c216bf67f98
| 968
|
py
|
Python
|
custom_components/acthor/config_flow.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
custom_components/acthor/config_flow.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
custom_components/acthor/config_flow.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
import voluptuous as vol
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME
from .acthor import test_connection
from .const import DEVICE_NAME, DOMAIN
class ACThorConfigFlow(ConfigFlow, domain=DOMAIN):
async def async_step_user(self, user_input: dict = None) -> dict:
errors = {}
if user_input is not None:
ok = await test_connection(user_input[CONF_HOST], timeout=5)
if ok:
return self.async_create_entry(
title=user_input[CONF_NAME],
data=user_input,
)
else:
errors["base"] = "connection_failed"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required(CONF_NAME, default=DEVICE_NAME): str,
vol.Required(CONF_HOST): str,
}),
errors=errors,
)
| 32.266667
| 72
| 0.597107
| 109
| 968
| 5.073395
| 0.46789
| 0.081374
| 0.047016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001529
| 0.32438
| 968
| 29
| 73
| 33.37931
| 0.844037
| 0
| 0
| 0
| 0
| 0
| 0.025826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f0a9e1fa344475b21cf62a27dc93bb2296049d
| 356
|
py
|
Python
|
doajtest/fixtures/common.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | null | null | null |
doajtest/fixtures/common.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | null | null | null |
doajtest/fixtures/common.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | null | null | null |
NOTES = {
'notes': [
{'date': '2014-05-22T00:00:00Z', 'note': 'Second Note'},
{'date': '2014-05-21T14:02:45Z', 'note': 'First Note'}
]
}
SUBJECT = {
"subject": ['HB1-3840', 'H']
}
OWNER = {
"owner": "Owner"
}
EDITORIAL = {
"editor_group": "editorgroup",
"editor": "associate"
}
SEAL = {
"doaj_seal": True,
}
| 14.833333
| 64
| 0.5
| 38
| 356
| 4.631579
| 0.684211
| 0.090909
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.258427
| 356
| 23
| 65
| 15.478261
| 0.541667
| 0
| 0
| 0
| 0
| 0
| 0.435393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f1a06f3be393877c548f9dcd340350faeb7ed3
| 46,397
|
py
|
Python
|
docnado/docnado.py
|
HEInventions/docnado
|
8817d8a9856b4babd9a2f81678a9ef0b8a75d4bc
|
[
"MIT"
] | 78
|
2018-10-09T16:28:26.000Z
|
2022-02-24T15:25:26.000Z
|
docnado/docnado.py
|
HEInventions/docnado
|
8817d8a9856b4babd9a2f81678a9ef0b8a75d4bc
|
[
"MIT"
] | 27
|
2018-11-01T16:30:50.000Z
|
2022-02-22T14:36:11.000Z
|
docnado/docnado.py
|
HEInventions/docnado
|
8817d8a9856b4babd9a2f81678a9ef0b8a75d4bc
|
[
"MIT"
] | 9
|
2018-11-06T18:50:51.000Z
|
2020-10-24T00:56:16.000Z
|
""" docnado.py
A rapid documentation tool that will blow you away.
"""
import os
import re
import sys
import csv
import glob
import time
import signal
import shutil
import urllib
import base64
import hashlib
import argparse
import tempfile
import datetime
import threading
import traceback
import subprocess
import platform
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib.parse import urlparse
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from xml.etree import ElementTree
from flask import Flask, url_for, abort, send_from_directory, \
render_template, Markup, make_response, render_template_string
import markdown
import markdown.util
from markdown.extensions import Extension
from markdown.postprocessors import Postprocessor
from markdown.inlinepatterns import LinkPattern, IMAGE_LINK_RE, dequote, handleAttributes
from markdown.blockprocessors import HashHeaderProcessor
from http.client import responses
if __package__:
from .navtree import NavItem, parse_nav_string
else:
from navtree import NavItem, parse_nav_string
class MultiPurposeLinkPattern(LinkPattern):
""" Embed image, video, youtube, csv or file download links
by extending the typical image tag pattern.
#  or 
If the link has "DOWNLOAD" in the alt text, treat it as a download.
Otherwise, see if its a YouTube video. Otherwise, see if its a
csv that can be turned into a table, otherwise if the link cannot be parsed
as a video, it will always be treated as an image.
"""
SUPPORTED_VIDEO = ('ogv', 'ogg', 'avi', 'mp4', 'webm', )
SUPPORTED_TABLES = ('csv', )
SUPPORTED_PDF = ('pdf', )
def get_src(self, m):
""" Get the source and parts from the matched groups: src, parts """
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
return self.sanitize_url(self.unescape(src)), src_parts
else:
return '', src_parts
@staticmethod
def youtube_url_validation(url):
""" Given a YouTube URL, return the ID component.
https://stackoverflow.com/questions/4705996
"""
youtube_regex = (r'(https?://)?(www\.)?'
r'(youtube|youtu|youtube-nocookie)\.(com|be)/'
r'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
return youtube_regex_match.group(6) if youtube_regex_match else None
@staticmethod
def as_youtube(m, video_id):
""" Return a DOM element that embeds a YouTube video. """
el = ElementTree.Element('iframe')
el.set('class', 'video')
el.set('src', f'https://www.youtube.com/embed/{video_id}?rel=0')
el.set('frameborder', '0')
el.set('allow', 'autoplay; encrypted-media')
el.set('allowfullscreen', '1')
return el
def as_pdf(self, m):
""" Return a DOM element that embeds a PDF document using an embed. """
src, parts = self.get_src(m)
wrapper = ElementTree.Element('aside')
wrapper.set('class', 'pdf-embed-wrapper')
el = ElementTree.SubElement(wrapper, 'embed')
el.set('class', 'pdf-embed')
el.set('src', src)
el.set('width', '100%')
el.set('type', 'application/pdf')
el.set('height', '100%') # width * 1.4142 (aspect ratio of a4)
el.set('pluginspage', 'http://www.adobe.com/products/acrobat/readstep2.html')
if len(parts) > 1:
el.set('alt', dequote(self.unescape(" ".join(parts[1:]))))
return wrapper
def as_video(self, m):
""" Return a video element """
src, parts = self.get_src(m)
el = ElementTree.Element('video')
el.set('src', src)
el.set("controls", "true")
handleAttributes(m.group(2), el)
return el
def as_image(self, m):
""" Return an image element """
el = ElementTree.Element('img')
src, parts = self.get_src(m)
el.set('src', src)
# Set the title if present.
if len(parts) > 1:
el.set('title', dequote(self.unescape(" ".join(parts[1:]))))
# Set the attributes on the element, if enabled.
# Set the 'alt' attribute with whatever is left from `handleAttributes`.
attrs = self.markdown.enable_attributes
alt_text = handleAttributes(m.group(2), el) if attrs else m.group(2)
el.set('alt', self.unescape(alt_text))
return el
def as_csv(self, m):
src, parts = self.get_src(m)
root = ElementTree.Element('table')
root.set('source', src)
root.set('class', 'csv-table table thead-light table-hover')
file_path = os.path.join(self.markdown.page_root, src)
with open(file_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
headers = next(reader)
rows = [r for r in reader]
thead = ElementTree.SubElement(root, 'thead')
for col in headers:
ElementTree.SubElement(thead, 'th').text = col
for row in rows:
tr = ElementTree.SubElement(root, 'tr')
for col in row:
ElementTree.SubElement(tr, 'td').text = col
return root
def as_download(self, m):
""" Create card layers used to make a download button. """
src, parts = self.get_src(m)
# Returns a human readable string representation of bytes
def _human_size(byte_number, units=(' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
return str(byte_number) + units[0] if byte_number < 1024 else _human_size(byte_number >> 10, units[1:])
# Get information required for card.
split_src = os.path.split(src)
file_path = os.path.join(self.markdown.page_root, *split_src)
file_size = os.path.getsize(file_path)
file_basename = os.path.basename(file_path)
card_text = dequote(self.unescape(" ".join(parts[1:]))) if len(parts) > 1 else ''
# If its a pptx, extract the thumbnail previews.
# NOTE: This works, but is is removed until we support other
# file types, which for now is not a priority.
# preview_uri = None
# import zipfile
# if (file_path.endswith('pptx')):
# with zipfile.ZipFile(file_path) as zipper:
# with zipper.open('docProps/thumbnail.jpeg', 'r') as fp:
# mime = 'image/jpeg'
# data64 = base64.b64encode(fp.read()).decode('utf-8')
# preview_uri = u'data:%s;base64,%s' % (mime, data64)
# Card and structure.
card = ElementTree.Element("div")
card.set('class', 'card download-card')
header = ElementTree.SubElement(card, 'div')
header.set('class', 'download-card-header')
body = ElementTree.SubElement(card, 'div')
body.set('class', 'download-card-body')
# Add preview image.
# if preview_uri:
# img = ET.SubElement(header, 'img')
# img.set('src', preview_uri)
# Filename link heading.
heading = ElementTree.SubElement(body, 'a')
heading.set('class', 'download-card-title')
heading.set('href', src)
download_icon = ElementTree.SubElement(heading, 'i')
download_icon.set('class', 'fa fa-download')
download_text = ElementTree.SubElement(heading, 'span')
download_text.text = file_basename
# Title element from the "quote marks" part.
body_desc = ElementTree.SubElement(body, 'span')
body_desc.text = card_text
# File size span at the bottom.
body_size = ElementTree.SubElement(body, 'span')
body_size.set('class', 'small text-muted')
body_size.text = f'{_human_size(file_size)}'
return card
@staticmethod
def _is_inject(m):
""" Determine if the ALT text [] part of the link says 'INJECT'. """
alt = m.group(2)
return alt.lower() == 'inject'
def as_raw(self, m):
""" Load the HTML document specified in the link, parse it to HTML elements and return it.
"""
src, parts = self.get_src(m)
# Find the path to the HTML document, relative to the current markdown page.
file_path = os.path.join(self.markdown.page_root, src)
raw_html_string = read_html_for_injection(file_path)
if len(parts) < 2:
parts.append("nothing_one=1||nothing_two=2")
# Helper function.
def _argify(args):
if '=' not in args:
raise ValueError('injection template requires named arguments split by ||')
left, right = args.split('=')
return left.strip(), right.strip()
# Split arg string on double pipe. Joins them to undo automattic splitting from the markdown.
arg_strings = " ".join(parts[1:]).strip('\"').split("||")
# Parse into dictionary of key-value pairs based on the '=' notation.
try:
named_args = dict([_argify(args) for args in arg_strings])
except Exception as e:
raise Exception(f"Error parsing ![INJECT] arguments in {self.markdown.page_file} {repr(e)}")
# Take the template renderer and give it our string, and named args.
# Capture the output as a string.
try:
injectable_templated_str = render_template_string(raw_html_string, **named_args)
except Exception as e:
raise Exception(f"Error rendering ![INJECT] template for file {file_path} {repr(e)}")
# Feed that string to the XML parser.
try:
return ElementTree.fromstring(injectable_templated_str)
except Exception as e:
raise Exception(f"Error parsing ![INJECT] template for file {file_path} {repr(e)}")
@staticmethod
def _is_download(m):
""" Determine if the ALT text [] part of the link says 'DOWNLOAD'. """
alt = m.group(2)
return alt.lower() == 'download'
def handleMatch(self, m):
""" Use the URL extension to render the link. """
src, parts = self.get_src(m)
if self._is_download(m):
return self.as_download(m)
elif self._is_inject(m):
return self.as_raw(m)
youtube = self.youtube_url_validation(src)
if youtube:
return self.as_youtube(m, youtube)
src_lower = src.lower()
if src_lower.endswith(self.SUPPORTED_TABLES):
return self.as_csv(m)
elif src_lower.endswith(self.SUPPORTED_PDF):
return self.as_pdf(m)
elif src_lower.endswith(self.SUPPORTED_VIDEO):
return self.as_video(m)
return self.as_image(m)
class OffsetHashHeaderProcessor(HashHeaderProcessor):
""" Process hash headers with an offset to control the type of heading
DOM element that is generated. """
HEADING_LEVEL_OFFSET = 1
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()]
after = block[m.end():]
if before:
self.parser.parseBlocks(parent, [before])
heading_level = len(m.group('level'))
h = ElementTree.SubElement(parent, 'h%d' % (heading_level + self.HEADING_LEVEL_OFFSET))
h.text = m.group('header').strip()
if after:
blocks.insert(0, after)
class ChecklistPostprocessor(Postprocessor):
"""
Adds checklist class to list element.
Adapted from: `markdown_checklist.extension`
"""
pattern = re.compile(r'<li>\[([ Xx])\]')
def run(self, html):
html = re.sub(self.pattern, self._convert_checkbox, html)
before = '<ul>\n<li><input type="checkbox"'
after = before.replace('<ul>', '<ul class="checklist">')
html = html.replace(before, after)
return html
@staticmethod
def _convert_checkbox(match):
state = match.group(1)
checked = ' checked' if state != ' ' else ''
return '<li><input type="checkbox" disabled%s>' % checked
# Remove the `video`, `iframe`, `aside`, and `table` elements as block elements.
markdown.util.BLOCK_LEVEL_ELEMENTS = re.compile(
r"^(p|div|h[1-6]|blockquote|pre|dl|ol|ul"
r"|script|noscript|form|fieldset|math"
r"|hr|hr/|style|li|dt|dd|thead|tbody"
r"|tr|th|td|section|footer|header|group|figure"
r"|figcaption|article|canvas|output"
r"|progress|nav|main)$",
re.IGNORECASE
)
class MultiExtension(Extension):
""" Markdown `Extension` that adds our new components and
overrides some that we are not using.
"""
def extendMarkdown(self, md, md_globals):
""" Configure markdown by disabling elements and replacing them with
others. """
# Add checklist processing extension based on: 'markdown_checklist.extension'.
md.postprocessors.add('checklist', ChecklistPostprocessor(md), '>raw_html')
# Remove default patterns.
del md.inlinePatterns['image_link']
# Create a new one and insert into pipeline.
multi_purpose_pattern = MultiPurposeLinkPattern(IMAGE_LINK_RE, md)
md.inlinePatterns['multi_purpose_pattern'] = multi_purpose_pattern
# Remove line headers.
del md.parser.blockprocessors['setextheader']
# Swap hash headers for one that can change the DOM h1, h2 level.
md.parser.blockprocessors['hashheader'] = OffsetHashHeaderProcessor(md.parser)
# https://python-markdown.github.io/extensions/
mdextensions = [MultiExtension(),
'markdown.extensions.tables',
'markdown.extensions.meta',
'markdown.extensions.def_list',
'markdown.extensions.headerid',
'markdown.extensions.fenced_code',
'markdown.extensions.attr_list']
def build_meta_cache(root):
""" Recursively search for Markdown files and build a cache of `Meta`
from metadata in the Markdown.
:param root: str: The path to search for files from.
"""
doc_files = glob.iglob(root + '/**/*.md', recursive=True)
def _meta(path):
with open(path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(path)
Markup(md.convert(f.read()))
return md.Meta if hasattr(md, 'Meta') else None
doc_files_meta = {os.path.relpath(path, start=root): _meta(path) for path in doc_files}
doc_files_meta = {path: value for path, value in doc_files_meta.items() if value is not None}
# If a nav filter is set, exclude relevant documents.
# This takes the comma separated string supplied to `nav_limit`
# and excludes certain documents if they are NOT in this list.
global CMD_ARGS
if CMD_ARGS.nav_limit:
nav_filters = CMD_ARGS.nav_limit.split(',')
nav_filters = [nav_filter.strip().lower() for nav_filter in nav_filters]
nav_filters = [nav_filter for nav_filter in nav_filters if nav_filter]
def _should_include(doc_meta):
nav_strings = [nav.lower() for nav in doc_meta.get('nav', [])]
return any([y.startswith(x) for x in nav_filters for y in nav_strings])
doc_files_meta = {path: value for path, value in doc_files_meta.items() if _should_include(value)}
return doc_files_meta
def build_nav_menu(meta_cache):
""" Given a cache of Markdown `Meta` data, compile a structure that can be
used to generate the NAV menu.
This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file.
"""
root = NavItem('root', 0)
# Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange()
# function, but this avoids-un arranged items moving round between page refreshes due to Dicts being
# unordered.
sorted_meta_cache = sorted(
meta_cache.items(),
key = lambda items: items[1].get('nav', [''])[0].split('>')[-1] # Sort by the last part of the nav string for each page.
)
for path, meta in sorted_meta_cache:
nav_str = meta.get('nav', [None])[0]
nav_chunks = parse_nav_string(nav_str)
node = root
for name, weight in nav_chunks:
n = NavItem(name, weight)
node = node.add(n)
node.bind(meta=meta, link=path)
root.arrange()
return root
def build_reload_files_list(extra_dirs):
""" Given a list of directories, return a list of files to watch for modification
and subsequent server reload. """
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
return extra_files
def read_html_for_injection(path):
""" Open an HTML file at the given path and return the contents
as a string. If the file does not exist, we raise an exception.
"""
# TODO: In the future, consider adding some caching here. However,
# beware of reloading / refereshing the page UX implications.
with open(path) as file:
return file.read()
def _render_markdown(file_path, **kwargs):
""" Given a `file_path` render the Markdown and return the result of `render_template`.
"""
global NAV_MENU, PROJECT_LOGO, PDF_GENERATION_ENABLED
default_template = 'document'
with open(file_path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(file_path)
md.page_file = file_path
markup = Markup(md.convert(f.read()))
# Fetch the template defined in the metadata.
template = md.Meta.get('template', None)
template = template[0] if template else default_template
if not template:
raise Exception('no template found for document')
template = f'{template}.html'
# Load any HTML to be injected from the meta-data.
injections = md.Meta.get('inject', [])
injections = [os.path.join(md.page_root, file) for file in injections]
injections = [read_html_for_injection(file) for file in injections]
# Render it out with all the prepared data.
return render_template(template,
content=markup,
nav_menu=NAV_MENU,
project_logo=PROJECT_LOGO,
pdf_enabled=PDF_GENERATION_ENABLED,
injections=injections,
**md.Meta,
**kwargs)
def configure_flask(app, root_dir):
""" Setup the flask application within this scope. """
@app.before_first_request
def build_navigation_cache():
""" Build an in-memory cache of document meta-data.
NOTE: The design choice is made to crash the application if any
of the markdown files cannot be opened and parsed. In the
future when it becomes more stable, this will probably change.
"""
# This is called each time the server restarts.
global NAV_MENU
meta_cache = build_meta_cache(root_dir)
# Build the nav menu data-structure.
NAV_MENU = build_nav_menu(meta_cache)
# Store the reference to the function that rebuilds the navigation cache.
app.build_navigation_cache = build_navigation_cache
@app.template_filter('gravatar')
def gravatar(email, size=100, rating='g', default='retro', use_ssl=False):
""" Return a gravatar link for a given email address. """
url = "https://secure.gravatar.com/avatar/" if use_ssl else "http://www.gravatar.com/avatar/"
email = email.strip().lower().encode('utf-8')
hash_email = hashlib.md5(email).hexdigest()
return f'{url}{hash_email}?s={size}&r={rating}&d={default}'
@app.template_filter()
def url_unquote(url):
""" Removes encoding around a URL. """
return urllib.parse.unquote(url)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/print_header")
def print_header():
""" Render the template for the header used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_header.html', project_logo=PROJECT_LOGO)
@app.route("/print_footer")
def print_footer():
""" Render the template for the footer used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_footer.html', project_logo=PROJECT_LOGO)
@app.errorhandler(404)
def page_not_found(e):
global NAV_MENU, PROJECT_LOGO
return render_template('404.html', nav_menu=NAV_MENU, project_logo=PROJECT_LOGO), 404
@app.route("/w/<path:page>")
def wiki(page):
""" Render the page. """
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' in [ext.lower() for ext in os.path.splitext(file_path)]:
return _render_markdown(file_path, current_page=page)
else:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
@app.route("/")
@app.route("/w/")
def homepage():
return wiki('home.md')
@app.route("/pdf/<path:page>")
def wiki_pdf(page):
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' not in [ext.lower() for ext in os.path.splitext(file_path)]:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
# Configure the different paths.
pdf_temp = f'{tempfile.mktemp()}.pdf'
input_url = url_for('wiki', page=page, _external=True)
header_url = url_for('print_header', _external=True)
footer_url = url_for('print_footer', _external=True)
args = f'{WKHTMLTOPDF_BINARY} --header-html {header_url} --footer-html {footer_url} \
--print-media-type --header-spacing 2 {input_url} {pdf_temp}'
# Invoke WkHTMLtoPDF
result = subprocess.check_output(args, shell=True)
if not result:
pass
# Write the newly generated temp pdf into a response.
with open(pdf_temp, 'rb') as f:
binary_pdf = f.read()
target_file_name = page.replace("/", "_").replace("\\", "_")
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
# response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf'
response.headers['Content-Disposition'] = f'inline; filename={target_file_name}.pdf'
# Delete the temp file and return the response.
os.remove(pdf_temp)
return response
def generate_static_pdf(app, root_dir, output_dir, nav_filter=None):
""" Generate a static PDF directory for the documentation in `root_dir`
into `output_dir`.
"""
global PORT_NUMBER
# Find all markdown document paths that are in the nav.
documents = build_meta_cache(root_dir)
markdown_docs_urls = ['pdf/' + file.replace('\\', '/') for file in documents.keys()]
# Generate URl to file pairs.
pairs = [(f'http://localhost:{PORT_NUMBER}/{url}',
f'{os.path.join(output_dir, *os.path.split(url))}.pdf')
for url in markdown_docs_urls]
# Download each pair.
for source, target in pairs:
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f'Source: {source} \n Target: {target}')
urllib.request.urlretrieve(source, target)
# Helper function to return the domain if present.
def is_absolute(url):
""" Returns True if the passed url string is an absolute path.
False if not
"""
links = urlparse(url)
return bool(links.netloc)
def generate_static_html(app, root_dir, output_dir):
""" Generate a static HTML site for the documentation in `root_dir`
into `output_dir`.
"""
from flask_frozen import Freezer, MissingURLGeneratorWarning
import warnings
warnings.filterwarnings("ignore", category=MissingURLGeneratorWarning)
# Update the flask config.
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['FREEZER_IGNORE_MIMETYPE_WARNINGS'] = True
app.config['FREEZER_DESTINATION'] = output_dir
# Create the freezer app. Make it use specific URLs.
freezer = Freezer(app, with_no_argument_rules=False, log_url_for=False)
# Register a generator that passes ALL files in the docs directory into the
# `wiki` flask route.
@freezer.register_generator
def wiki():
all_docs = [file.replace(f'{root_dir}', '/w').replace(f'{os.path.sep}', '/')
for file in glob.iglob(f'{root_dir}/**/*', recursive=True)
if os.path.isfile(file)]
for doc in all_docs:
yield doc
# Save all the URLs using the correct extension and MIME type.
freezer.freeze()
# For each `.md` file in the output directory:
for markdown_file in glob.iglob(f'{output_dir}/**/*.md', recursive=True):
# Rewrite all relative links to other `.md` files to `.html.`
output = ''
with open(markdown_file, 'r', encoding="utf-8") as f:
html = f.read()
def _href_replace(m):
href = m.group()
if is_absolute(href[6:-1]):
return href
return href.replace('.md', '.html')
output = re.sub('href="(.*md)"', _href_replace, html)
# Rename the file from `.md` to HTML.
with open(markdown_file[:-3] + '.html', 'w', encoding="utf-8") as f:
f.write(output)
# Delete the Markdown file.
os.remove(markdown_file)
def load_project_logo(logo_file=None):
""" Attempt to load the project logo from the specified path.
If this fails, return None. If this succeeds, convert it to a data-uri.
"""
if not logo_file:
return None
if not os.path.exists(logo_file):
return None
with open(logo_file, 'rb') as fp:
mime = 'image/png'
data64 = base64.b64encode(fp.read()).decode('utf-8')
preview_uri = u'data:%s;base64,%s' % (mime, data64)
return preview_uri
def check_pdf_generation_cap():
""" Check to see if we can use PDF generation by attempting to use the binary. """
global WKHTMLTOPDF_BINARY
retcode = subprocess.call(f'{WKHTMLTOPDF_BINARY} --version',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return retcode == 0
def copy_local_project(force=False):
""" Copy the sample docs and style into the local working directory.
Note: This will overwrite anything currently in those folders.
"""
source_root = os.path.dirname(__file__)
target_root = os.getcwd()
targets = ['docs', 'style', 'logo.png']
pairs = [(os.path.join(source_root, path), os.path.join(target_root, path))
for path in targets]
for source, target in pairs:
if os.path.isdir(source):
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
shutil.rmtree(target)
shutil.copytree(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copytree(source, target)
else:
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
os.remove(target)
shutil.copyfile(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copyfile(source, target)
def find_references(document_path):
""" Search through the markdown 'document_path' and make a list of referenced files
with paths that are relative to the directory containing the `document_path`.
"""
# Open the file to search.
with open(document_path, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
# Render as HTML.
md = markdown.Markdown(extensions=mdextensions)
document_dir = os.path.dirname(document_path)
md.page_root = document_dir
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(md.convert(markdown_raw_data), 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
# For each entry in the `tags_to_search` table, extract the tag attribute value.
references = set()
for k, v in tags_to_search.items():
for tag in soup.find_all(k):
val = tag.get(v)
if val:
references.add(val)
# Normalise the referenced assets (to take into account relative paths).
references = [os.path.join(document_dir, urllib.request.url2pathname(ref)) for ref in references]
# Make unique.
return set(references)
def has_nav(markdown_text):
""" Returns True if the passed string of text contains navbar metadata.
Returns False if it does not.
"""
expression = re.compile(r'(?=\n|)nav:\s+\w+(?=\n |)')
return True if expression.search(markdown_text) else False
def find_orphans(files):
""" Searches all files and folders recursively in the given path for image and video assets
that are unused by markdown files.
"""
# Find all references in
pages = {}
for file in files:
if file.endswith('.md'):
pages[file] = find_references(file)
# Remove the markdown documents that have a navbar metadata.
md_with_nav = []
for file in files:
if file.endswith('.md'):
with open(file, encoding='utf-8') as f:
if has_nav(f.read().lower()):
md_with_nav.append(file)
files = [x for x in files if x not in md_with_nav]
# Create a flat list of all references in the markdown files
all_references = []
for i in pages.values():
all_references += [k for k in i]
# Output unused assets
return [i for i in files if i not in all_references]
class DocumentLinks:
""" A helper class to process the `<a href.../>` links from a single
markdown document that is rendered using our own renderer.
"""
def __init__(self, md_file):
""" Open a Markdown document and find all links in `<a href .../>`.
"""
# Store important information about this document.
self.md_file = md_file
self.md_dir = os.path.dirname(md_file)
# Read in Markdown and generate HTML with our parser.
with open(md_file, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
md = markdown.Markdown(extensions=mdextensions)
md.page_root = self.md_dir
html = md.convert(markdown_raw_data)
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(html, 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
self.references = set()
for k, v in tags_to_search.items():
links = soup.find_all(k)
for link in links:
if link.get('href'):
if link.get('href').find('http:') > -1 or link.get('href').find('https:') > -1:
val = link.get(v)
if val:
self.references.add(val)
else:
val = link.get(v)
if val:
self.references.add(val)
@property
def web_links(self):
""" Generate a list of web links from our cached links.
"""
return [link for link in self.references if is_absolute(link)]
@property
def relative_links(self):
""" Generate a list of relative file system links from our cached links.
This converts from a web path to a path on disk then normalises the path to the current directory.
"""
def _norm(path):
return os.path.join(self.md_dir, urllib.request.url2pathname(path))
return [_norm(link) for link in self.references if not is_absolute(link)]
@staticmethod
def validate_url(address):
""" Returns `True` if page at address returns with status code 200 (ok) otherwise returns `False`.
"""
try:
request = requests.head(address)
return request.status_code, address
except requests.exceptions.RequestException:
return False, address
def detect_broken_links(self, process_pool):
""" Go through all the `web_links` and the `relative_links` and report
which are broken (i.e. do not resolve to HTTP200OK or a file on disk).
"""
result = process_pool.map(self.validate_url, self.web_links)
for response, url in result:
if not response == 200:
yield url + ' Status: ' + (responses[response] if response is int else "Exception")
for file in self.relative_links:
if not os.path.exists(file):
yield file
def generate_metadata(path):
""" Add relevant metadata to the top of the markdown file at the passed path.
Title is drawn from the filename, Date from the last modified timestamp, Version defaults at 1.0.0,
Nav is generated from the filepath, and Authors are generated from the git contributors (if applicable) and
are otherwise left blank.
Warning: Does not check if there is existing metadata.
"""
s = subprocess.getoutput(f"git log -p {path}")
lines = s.split(os.linesep)
authors = set([re.search(r'<(.*)>', line).group(1)for line in lines if 'Author:' in line])
file_status = os.stat(path)
nav_path = os.path.sep.join(path.split(os.path.sep)[1:])
metadata = {
'title': ' '.join(
path
.split('.')[0]
.split(os.path.sep)[-1]
.replace('_', ' ')
.replace('-', ' ')
.title()
.split()
),
'desc': '',
'date': datetime.datetime.utcfromtimestamp(file_status.st_mtime).strftime('%Y/%m/%d'),
'version': '1.0.0',
'template': '',
'nav': nav_path.replace(os.path.sep, '>').title().split('.')[0],
'percent': '100',
'authors': ' '.join(authors),
}
result = ""
for key in metadata.keys():
result += ('{}:{}{}\n'.format(key, '\t' if len(key) > 6 else '\t\t', metadata[key]))
with open(path, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
f.write(result)
f.write(content)
class ReloadHandler(PatternMatchingEventHandler):
""" Rebuild the document metadata / navigation cache when markdown files are updated
in the documents directory. """
def __init__(self, app):
super(ReloadHandler, self).__init__(patterns=['*.md'], ignore_directories=False, case_sensitive=False)
self.flask_app = app
def on_any_event(self, event):
self.flask_app.build_navigation_cache()
global CMD_ARGS, NAV_MENU, PROJECT_LOGO, WKHTMLTOPDF_BINARY, PDF_GENERATION_ENABLED, PORT_NUMBER
CMD_ARGS = None
NAV_MENU = {}
PROJECT_LOGO = None
WKHTMLTOPDF_BINARY = None
PDF_GENERATION_ENABLED = False
def main():
""" Application entrypoint. """
global PORT_NUMBER
PORT_NUMBER = 5000
# Parse the command line arguments.
parser = argparse.ArgumentParser(description='docnado: Lightweight tool for rendering \
Markdown documentation with different templates.')
parser.add_argument('--html', action='store', dest='html_output_dir',
help='Generate a static site from the server and output to the \
specified directory.')
parser.add_argument('--pdf', action='store', dest='pdf_output_dir',
help='Generate static PDFs from the server and output to the \
specified directory.')
parser.add_argument('--nav-limit', action='store', dest='nav_limit',
default=None,
help='Include certain document trees only based on a comma separated \
list of nav strings. e.g. Tooling,Document')
parser.add_argument('--new', action="store_true", dest='new_project',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Does not overwrite existing files.')
parser.add_argument('--new-force', action="store_true", dest='new_project_force',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Force deletion of existing files.')
parser.add_argument('--dirs', action="store_true", dest='show_dirs',
default=False,
help='Display the different directories the software is using \
to search for documentation and styles.')
parser.add_argument('--generate-meta', action="store", dest='generate_meta',
default=False,
help='Generate metadata for markdown files in the specified directory.')
parser.add_argument('--find-orphans', action="store_true", dest='find_orphans',
default=False,
help='Identify unused media assets (orphans)')
parser.add_argument('--find-broken-links', action="store_true", dest='find_broken_links',
default=False,
help='Identify broken external links.')
parser.add_argument('--port', action="store", dest='new_port_number',
default=False,
help='Specify a port for the docnado server')
parser.add_argument('--host', action="store", dest='set_host',
default=False,
help='Set the docnado development server to listen on IP addresses.')
# Import the command line args and make them application global.
global CMD_ARGS
args = parser.parse_args()
CMD_ARGS = args
# Load config from the environment and validate it.
global PROJECT_LOGO, PDF_GENERATION_ENABLED, NAV_MENU, WKHTMLTOPDF_BINARY
TRUE = 'TRUE'
FALSE = 'FALSE'
flask_debug = os.environ.get('DN_FLASK_DEBUG', FALSE) == TRUE
watch_changes = os.environ.get('DN_RELOAD_ON_CHANGES', TRUE) == TRUE
WKHTMLTOPDF_BINARY = ('wkhtmltopdf_0.12.5.exe' if platform.system() == 'Windows' else 'wkhtmltopdf')
PDF_GENERATION_ENABLED = check_pdf_generation_cap()
dir_documents = os.environ.get('DN_DOCS_DIR', os.path.join(os.getcwd(), 'docs'))
dir_style = os.environ.get('DN_STYLE_DIR', os.path.join(os.getcwd(), 'style'))
logo_location = os.environ.get('DN_PROJECT_LOGO', os.path.join(os.getcwd(), 'logo.png'))
# If `style` folder does not exist, use the one in site-packages.
if not os.path.exists(dir_style) and not os.path.isdir(dir_style):
dir_style = os.path.join(os.path.dirname(__file__), 'style')
# Attempt to load the project logo into a base64 data uri.
PROJECT_LOGO = load_project_logo(logo_location)
# Compute the static and template directories.
dir_static = os.path.join(dir_style, 'static')
dir_templates = os.path.join(dir_style, 'templates')
# If the user is asking to create a new project.
if args.new_project:
copy_local_project()
sys.exit()
if args.new_project_force:
copy_local_project(force=True)
return 0
if args.new_port_number:
PORT_NUMBER = int(args.new_port_number)
if args.generate_meta:
doc_files = glob.iglob(args.generate_meta + '/**/*.md', recursive=True)
for i in doc_files:
generate_metadata(i)
return 0
if args.find_orphans:
# Find all the assets in the directory/subdirectories recursively and append their file path to a list.
files = glob.glob((dir_documents + '/**/*.*'), recursive=True)
files = [f for f in files if not os.path.isdir(f)]
orphans = find_orphans(files)
if orphans:
print(f'{len(orphans)} Unused assets (orphans):\n\t' + '\n\t'.join(orphans))
return -1
return 0
if args.find_broken_links:
process_pool = Pool(processes=10)
md_files = glob.glob((dir_documents + '/**/*.md'), recursive=True)
md_reports = tuple((md, list(DocumentLinks(md).detect_broken_links(process_pool))) for md in md_files)
num_broken = 0
for file, report in md_reports:
if report:
num_broken += len(report)
print(f'{file}\n\t' + '\n\t'.join(report))
return -1 if num_broken else 0
if args.show_dirs:
print('The following directories are being used: ')
print('\t', f'Documents -> {dir_documents}')
print('\t', f'Logo -> {logo_location}')
print('\t', f'Style -> {dir_style}')
print('\t', f' Static -> {dir_static}')
print('\t', f' Templates -> {dir_templates}')
sys.exit()
if not os.path.exists(dir_documents) and not os.path.isdir(dir_documents):
print(f'Error: Documents directory "{dir_documents}" does not exist. \
Create one called `docs` and fill it with your documentation.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_static) and not os.path.isdir(dir_static):
print(f'Error: Static directory "{dir_static}" does not exist.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_templates) and not os.path.isdir(dir_templates):
print(f'Error: Templates directory "{dir_templates}" does not exist.', file=sys.stderr)
sys.exit(-1)
# Create the server.
app = Flask(__name__,
static_url_path='',
template_folder=dir_templates,
static_folder=dir_static)
# Attach routes and filters.
configure_flask(app, dir_documents)
# Output PDF files.
if args.pdf_output_dir:
if not check_pdf_generation_cap():
print(f'Error: PDF generation requires WkHTMLtoPDF.', file=sys.stderr)
sys.exit(-1)
def gen_pdfs():
time.sleep(2)
generate_static_pdf(
app, dir_documents, os.path.join(os.getcwd(), args.pdf_output_dir)
)
time.sleep(5)
os.kill(os.getpid(), signal.SIGTERM)
t1 = threading.Thread(target=gen_pdfs)
t1.start()
app.run(debug=flask_debug, threaded=True, port=PORT_NUMBER)
sys.exit()
# Output a static site.
if args.html_output_dir:
PDF_GENERATION_ENABLED = False
try:
generate_static_html(app, dir_documents, os.path.join(os.getcwd(), args.html_output_dir))
index_html = """ <!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url=./w/">
</head>
<body>
</body>
</html>"""
with open(os.path.join(os.getcwd(), args.html_output_dir, 'index.html'), 'w') as f:
f.write(index_html)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
sys.exit()
# Watch for any changes in the docs or style directories.
dn_watch_files = []
observer = None
if watch_changes:
observer = Observer()
observer.schedule(ReloadHandler(app), path=dir_documents, recursive=True)
observer.start()
dn_watch_files = build_reload_files_list([__name__, dir_style])
# Run the server.
if args.set_host:
try:
print('Attempting set sevelopment server listen on public IP address: ' + args.set_host)
print('WARNING: The Docnado development environment is intended to be used as a development tool ONLY, '
'and is not recommended for use in a production environment.')
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files, host=args.set_host)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
else:
try:
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
# if running brainerd directly, boot the app
if __name__ == "__main__":
main()
| 37.782573
| 128
| 0.613078
| 5,932
| 46,397
| 4.665374
| 0.15408
| 0.014092
| 0.007949
| 0.004553
| 0.221102
| 0.171635
| 0.137416
| 0.12383
| 0.11364
| 0.099801
| 0
| 0.005587
| 0.274802
| 46,397
| 1,227
| 129
| 37.813366
| 0.816923
| 0.202125
| 0
| 0.199749
| 0
| 0
| 0.131407
| 0.022241
| 0
| 0
| 0
| 0.000815
| 0
| 1
| 0.072864
| false
| 0.003769
| 0.046482
| 0.005025
| 0.209799
| 0.04397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f7703f7d61c2e287ab471ebd07742e1540f442
| 15,577
|
py
|
Python
|
mkt/search/tests/test_filters.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/search/tests/test_filters.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/search/tests/test_filters.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from nose.tools import eq_, ok_
from rest_framework.exceptions import ParseError
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.test.utils import override_settings
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.features import FeatureProfile
from mkt.search.filters import (DeviceTypeFilter, ProfileFilter,
PublicAppsFilter, PublicSearchFormFilter,
RegionFilter, SearchQueryFilter, SortingFilter,
ValidAppsFilter)
from mkt.search.forms import TARAKO_CATEGORIES_MAPPING
from mkt.search.views import SearchView
from mkt.site.tests import TestCase
from mkt.webapps.indexers import WebappIndexer
class FilterTestsBase(TestCase):
def setUp(self):
super(FilterTestsBase, self).setUp()
self.req = RequestFactory().get('/')
self.req.user = AnonymousUser()
self.view_class = SearchView
def _filter(self, req=None, data=None):
req = req or RequestFactory().get('/', data=data or {})
req.user = AnonymousUser()
queryset = WebappIndexer.search()
for filter_class in self.filter_classes:
queryset = filter_class().filter_queryset(req, queryset,
self.view_class)
return queryset.to_dict()
class TestQueryFilter(FilterTestsBase):
filter_classes = [SearchQueryFilter]
def test_q(self):
qs = self._filter(data={'q': 'search terms'})
# Spot check a few queries.
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name': {'query': 'search terms', 'boost': 4,
'slop': 1, 'type': 'phrase'}}}
in should)
ok_({'prefix': {'name': {'boost': 1.5, 'value': 'search terms'}}}
in should)
ok_({'match': {'name_english': {'query': 'search terms',
'boost': 2.5}}}
in should)
ok_({'match': {'description_english': {'query': 'search terms',
'boost': 0.6,
'analyzer': 'english_analyzer',
'type': 'phrase'}}}
in should)
def test_fuzzy_single_word(self):
qs = self._filter(data={'q': 'term'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'fuzzy': {'tags': {'prefix_length': 1, 'value': 'term'}}}
in should)
def test_no_fuzzy_multi_word(self):
qs = self._filter(data={'q': 'search terms'})
qs_str = json.dumps(qs)
ok_('fuzzy' not in qs_str)
@override_settings(ES_USE_PLUGINS=True)
def test_polish_analyzer(self):
"""
Test that the polish analyzer is included correctly since it is an
exception to the rest b/c it is a plugin.
"""
with self.activate(locale='pl'):
qs = self._filter(data={'q': u'próba'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name_polish': {'query': u'pr\xf3ba',
'boost': 2.5}}}
in should)
ok_({'match': {'description_polish': {'query': u'pr\xf3ba',
'boost': 0.6,
'analyzer': 'polish',
'type': 'phrase'}}}
in should)
class TestFormFilter(FilterTestsBase):
filter_classes = [PublicSearchFormFilter]
def test_category(self):
qs = self._filter(data={'cat': 'games'})
ok_({'terms': {'category': ['games']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tag(self):
qs = self._filter(data={'tag': 'tarako'})
ok_({'term': {'tags': 'tarako'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tarako_categories(self):
qs = self._filter(data={'cat': 'tarako-lifestyle'})
ok_({'terms':
{'category': TARAKO_CATEGORIES_MAPPING['tarako-lifestyle']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-games'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-games']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-tools'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-tools']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type(self):
qs = self._filter(data={'app_type': ['hosted']})
ok_({'terms': {'app_type': [1]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type_packaged(self):
"""Test packaged also includes privileged."""
qs = self._filter(data={'app_type': ['packaged']})
ok_({'terms': {'app_type': [2, 3]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_manifest_url(self):
url = 'http://hy.fr/manifest.webapp'
qs = self._filter(data={'manifest_url': url})
ok_({'term': {'manifest_url': url}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline(self):
"""Ensure we are filtering by offline-capable apps."""
qs = self._filter(data={'offline': 'True'})
ok_({'term': {'is_offline': True}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_online(self):
"""Ensure we are filtering by apps that require online access."""
qs = self._filter(data={'offline': 'False'})
ok_({'term': {'is_offline': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline_and_online(self):
"""Ensure we are not filtering by offline/online by default."""
# Pass any form values other than 'offline' to create the dict.
qs = self._filter(data={'cat': 'games'})
ok_({'term': {'is_offline': True}}
not in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_offline': False}}
not in qs['query']['filtered']['filter']['bool']['must'])
def test_languages(self):
qs = self._filter(data={'languages': 'fr'})
ok_({'terms': {'supported_locales': ['fr']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'languages': 'ar,en-US'})
ok_({'terms': {'supported_locales': ['ar', 'en-US']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_author(self):
qs = self._filter(data={'author': 'Mozilla LABS'})
ok_({'term': {'author.raw': u'mozilla labs'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_installs_allowed_from(self):
qs = self._filter(data={'installs_allowed_from': '*'})
ok_({'term': {'installs_allowed_from': u'*'}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test that we don't filter by this field if not provided.
qs = self._filter()
ok_('installs_allowed_from' not in json.dumps(qs),
"Unexpected 'installs_allowed_from' in query")
def test_premium_types(self):
def ptype(p):
return mkt.ADDON_PREMIUM_API_LOOKUP.get(p)
# Test a single premium type.
qs = self._filter(data={'premium_types': ['free']})
ok_({'terms': {'premium_type': [ptype('free')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test many premium types.
qs = self._filter(data={'premium_types': ['free', 'free-inapp']})
ok_({'terms': {'premium_type': [ptype('free'), ptype('free-inapp')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test a non-existent premium type.
with self.assertRaises(ParseError):
self._filter(data={'premium_types': ['free', 'platinum']})
def test_device(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': DEVICE_CHOICES_IDS['desktop']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_no_device_with_device_type(self):
"""Test that providing a device type w/o device doesn't filter."""
qs = self._filter(data={'dev': '', 'device': 'firefoxos'})
ok_('filtered' not in qs['query'].keys())
class TestPublicAppsFilter(FilterTestsBase):
filter_classes = [PublicAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'term': {'status': mkt.STATUS_PUBLIC}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestValidAppsFilter(FilterTestsBase):
filter_classes = [ValidAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'terms': {'status': mkt.VALID_STATUSES}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestDeviceTypeFilter(FilterTestsBase):
filter_classes = [DeviceTypeFilter]
def test_no_filters(self):
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_mobile(self):
self.req.MOBILE = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_gaia(self):
self.req.GAIA = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tablet(self):
self.req.TABLET = True
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_device_in_querystring(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': 1}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'mobile'})
ok_({'term': {'device': 2}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'tablet'})
ok_({'term': {'device': 3}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'firefoxos'})
ok_({'term': {'device': 4}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestRegionFilter(FilterTestsBase):
filter_classes = [RegionFilter]
def test_no_region_default(self):
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.RESTOFWORLD.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_region(self):
self.req.REGION = mkt.regions.BRA
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.BRA.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestProfileFilter(FilterTestsBase):
filter_classes = [ProfileFilter]
def profile_qs(self, disabled_features=None):
if disabled_features is None:
disabled_features = {}
profile = FeatureProfile().fromkeys(FeatureProfile(), True)
for feature in disabled_features:
profile[feature] = False
return {'pro': profile.to_signature(), 'dev': 'firefoxos'}
def test_filter_all_features_present(self):
qs = self._filter(data=self.profile_qs())
ok_('filtered' not in qs['query'].keys())
def test_filter_one_feature_present(self):
qs = self._filter(data=self.profile_qs(disabled_features=['sms']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_filter_one_feature_present_desktop(self):
data = self.profile_qs(disabled_features=['sms'])
data['dev'] = 'desktop'
qs = self._filter(data=data)
ok_('filtered' not in qs['query'].keys())
def test_filter_multiple_features_present(self):
qs = self._filter(
data=self.profile_qs(disabled_features=['sms', 'apps']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
ok_({'term': {'features.has_apps': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestSortingFilter(FilterTestsBase):
filter_classes = [SortingFilter]
def test_sort(self):
for api_sort, es_sort in SortingFilter.DEFAULT_SORTING.items():
qs = self._filter(data={'sort': [api_sort]})
if es_sort.startswith('-'):
ok_({es_sort[1:]: {'order': 'desc'}} in qs['sort'], qs)
else:
eq_([es_sort], qs['sort'], qs)
def test_sort_multiple(self):
qs = self._filter(data={'sort': ['rating', 'created']})
ok_({'bayesian_rating': {'order': 'desc'}} in qs['sort'])
ok_({'created': {'order': 'desc'}} in qs['sort'])
def test_sort_regional(self):
"""Popularity and trending use regional sorting for mature regions."""
req = RequestFactory().get('/')
req.REGION = mkt.regions.BRA
# Default empty query searches use popularity.
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Popularity.
req = RequestFactory().get('/', data={'sort': ['popularity']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Trending.
req = RequestFactory().get('/', data={'sort': ['trending']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'trending_%s' % mkt.regions.BRA.id: {'order': 'desc'}}
in qs['sort'])
class TestCombinedFilter(FilterTestsBase):
"""
Basic test to ensure that when filters are combined they result in the
expected query structure.
"""
filter_classes = [SearchQueryFilter, PublicSearchFormFilter,
PublicAppsFilter, SortingFilter]
def test_combined(self):
qs = self._filter(data={'q': 'test', 'cat': 'games',
'sort': 'trending'})
ok_(qs['query']['filtered']['query']['function_score'])
ok_(qs['query']['filtered']['filter'])
must = qs['query']['filtered']['filter']['bool']['must']
ok_({'terms': {'category': ['games']}} in must)
ok_({'term': {'status': 4}} in must)
ok_({'term': {'is_disabled': False}} in must)
ok_({'trending': {'order': 'desc'}} in qs['sort'])
query = qs['query']['filtered']['query']
ok_({'field_value_factor': {'field': 'boost'}}
in query['function_score']['functions'])
ok_({'match': {'name_english': {'boost': 2.5, 'query': u'test'}}}
in query['function_score']['query']['bool']['should'])
| 39.737245
| 79
| 0.560506
| 1,748
| 15,577
| 4.81865
| 0.16476
| 0.056987
| 0.065535
| 0.089754
| 0.480708
| 0.416123
| 0.375163
| 0.327081
| 0.288496
| 0.237089
| 0
| 0.002323
| 0.253964
| 15,577
| 391
| 80
| 39.838875
| 0.722485
| 0.05521
| 0
| 0.314879
| 0
| 0
| 0.2099
| 0.00588
| 0
| 0
| 0
| 0
| 0.00346
| 1
| 0.138408
| false
| 0
| 0.048443
| 0.00346
| 0.262976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0f8b6be8671efa3ab8fb691c490862ecc07081d
| 668
|
py
|
Python
|
noxfile.py
|
sethmlarson/workplace-search-python
|
0680ce7144fc0608d3d8c336315ffaf7ddc3ca2d
|
[
"Apache-2.0"
] | 5
|
2020-03-05T16:37:35.000Z
|
2021-02-26T03:44:09.000Z
|
noxfile.py
|
sethmlarson/workplace-search-python
|
0680ce7144fc0608d3d8c336315ffaf7ddc3ca2d
|
[
"Apache-2.0"
] | 1
|
2019-01-08T20:10:16.000Z
|
2019-01-08T20:10:16.000Z
|
noxfile.py
|
sethmlarson/workplace-search-python
|
0680ce7144fc0608d3d8c336315ffaf7ddc3ca2d
|
[
"Apache-2.0"
] | 1
|
2020-04-22T18:20:26.000Z
|
2020-04-22T18:20:26.000Z
|
import nox
SOURCE_FILES = (
"setup.py",
"noxfile.py",
"elastic_workplace_search/",
"tests/",
)
@nox.session(python=["2.7", "3.4", "3.5", "3.6", "3.7", "3.8"])
def test(session):
session.install(".")
session.install("-r", "dev-requirements.txt")
session.run("pytest", "--record-mode=none", "tests/")
@nox.session()
def blacken(session):
session.install("black")
session.run("black", *SOURCE_FILES)
lint(session)
@nox.session()
def lint(session):
session.install("flake8", "black")
session.run("black", "--check", *SOURCE_FILES)
session.run("flake8", "--select=E,W,F", "--max-line-length=88", *SOURCE_FILES)
| 20.242424
| 82
| 0.609281
| 88
| 668
| 4.556818
| 0.511364
| 0.109726
| 0.157107
| 0.099751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028419
| 0.157186
| 668
| 32
| 83
| 20.875
| 0.683837
| 0
| 0
| 0.090909
| 0
| 0
| 0.288922
| 0.037425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.045455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fb958c05e67ba3756a0924303bc1ac81028564
| 644
|
py
|
Python
|
challenges/python-solutions/day-25.py
|
elifloresch/thirty-days-challenge
|
d3d41f5ce8cc4155ebf9cf52c1ece43c15a1e2af
|
[
"MIT"
] | null | null | null |
challenges/python-solutions/day-25.py
|
elifloresch/thirty-days-challenge
|
d3d41f5ce8cc4155ebf9cf52c1ece43c15a1e2af
|
[
"MIT"
] | null | null | null |
challenges/python-solutions/day-25.py
|
elifloresch/thirty-days-challenge
|
d3d41f5ce8cc4155ebf9cf52c1ece43c15a1e2af
|
[
"MIT"
] | null | null | null |
import math
def is_prime_number(number):
if number < 2:
return False
if number == 2 or number == 3:
return True
if number % 2 == 0 or number % 3 == 0:
return False
number_sqrt = math.sqrt(number)
int_number_sqrt = int(number_sqrt) + 1
for d in range(6, int_number_sqrt, 6):
if number % (d - 1) == 0 or number % (d + 1) == 0:
return False
return True
test_cases = int(input())
numbers = []
for test_case in range(test_cases):
numbers.append(int(input()))
for n in numbers:
if is_prime_number(n):
print('Prime')
else:
print('Not prime')
| 19.515152
| 58
| 0.57764
| 96
| 644
| 3.739583
| 0.333333
| 0.089136
| 0.075209
| 0.050139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03139
| 0.307453
| 644
| 32
| 59
| 20.125
| 0.773543
| 0
| 0
| 0.217391
| 0
| 0
| 0.021739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.304348
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fbf814aa561afb0f3e8aefc0b444cab5d08bda
| 1,265
|
py
|
Python
|
examples/path_config.py
|
rnixx/garden.cefpython
|
91d5f69e9983a28ce1971637d7d2f0051c456882
|
[
"MIT"
] | 13
|
2017-02-10T12:07:29.000Z
|
2021-12-15T02:07:07.000Z
|
examples/path_config.py
|
Informatic/garden.cefpython
|
b7a03d31fd18a32a44ae293d4101b4cf7608795b
|
[
"MIT"
] | 22
|
2015-02-13T09:58:30.000Z
|
2015-06-12T08:55:20.000Z
|
examples/path_config.py
|
Informatic/garden.cefpython
|
b7a03d31fd18a32a44ae293d4101b4cf7608795b
|
[
"MIT"
] | 12
|
2017-05-03T01:18:31.000Z
|
2021-10-01T06:57:41.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Minimal example of the CEFBrowser widget use. Here you don't have any controls
(back / forth / reload) or whatsoever. Just a kivy app displaying the
chromium-webview.
In this example we demonstrate how the cache path of CEF can be set.
"""
import os
from kivy.app import App
from kivy.garden.cefpython import CEFBrowser
from kivy.logger import Logger
if __name__ == '__main__':
class SimpleBrowserApp(App):
def build(self):
# Set runtime data paths
CEFBrowser.set_data_path(os.path.realpath("./cef_data"))
# CEFBrowser.set_caches_path(os.path.realpath("./cef_caches"))
# CEFBrowser.set_cookies_path(os.path.realpath("./cef_cookies"))
# CEFBrowser.set_logs_path(os.path.realpath("./cef_logs"))
Logger.info("Example: The CEF pathes have been set to")
Logger.info("- Cache %s", CEFBrowser._caches_path)
Logger.info("- Cookies %s", CEFBrowser._cookies_path)
Logger.info("- Logs %s", CEFBrowser._logs_path)
# Create CEFBrowser instance. Go to test-site.
cb = CEFBrowser(url="http://jegger.ch/datapool/app/test.html")
return cb
SimpleBrowserApp().run()
| 34.189189
| 78
| 0.656917
| 167
| 1,265
| 4.820359
| 0.502994
| 0.064596
| 0.049689
| 0.089441
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00102
| 0.225296
| 1,265
| 36
| 79
| 35.138889
| 0.820408
| 0.416601
| 0
| 0
| 0
| 0
| 0.176796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fc122c2f5c222700dce3588b9faccba2d8800b
| 312
|
py
|
Python
|
simple-systems/and_xor_shift.py
|
laserbat/random-projects
|
925f94f80299df6f16e91975e89f5fff7df20005
|
[
"WTFPL"
] | 3
|
2019-04-14T12:29:10.000Z
|
2020-02-26T22:27:04.000Z
|
simple-systems/and_xor_shift.py
|
laserbat/random-projects
|
925f94f80299df6f16e91975e89f5fff7df20005
|
[
"WTFPL"
] | null | null | null |
simple-systems/and_xor_shift.py
|
laserbat/random-projects
|
925f94f80299df6f16e91975e89f5fff7df20005
|
[
"WTFPL"
] | 1
|
2020-06-08T22:12:16.000Z
|
2020-06-08T22:12:16.000Z
|
#!/usr/bin/python3
# If F(a) is any function that can be defined as composition of bitwise XORs, ANDs and left shifts
# Then the dynac system x_(n+1) = F(x_n) is Turing complete
# Proof by simulation (rule110)
a = 1
while a:
print(bin(a))
a = a ^ (a << 1) ^ (a & (a << 1)) ^ (a & (a << 1) & (a << 2))
| 26
| 98
| 0.589744
| 58
| 312
| 3.137931
| 0.655172
| 0.054945
| 0.049451
| 0.065934
| 0.054945
| 0.054945
| 0.054945
| 0
| 0
| 0
| 0
| 0.042373
| 0.24359
| 312
| 11
| 99
| 28.363636
| 0.728814
| 0.647436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fc7fc48d25fb30b40c2e42b598b6eff6d50954
| 5,543
|
py
|
Python
|
trinity/protocol/common/peer_pool_event_bus.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
trinity/protocol/common/peer_pool_event_bus.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
trinity/protocol/common/peer_pool_event_bus.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
from abc import (
abstractmethod,
)
from typing import (
Any,
Callable,
cast,
FrozenSet,
Generic,
Type,
TypeVar,
)
from cancel_token import (
CancelToken,
)
from p2p.exceptions import (
PeerConnectionLost,
)
from p2p.kademlia import Node
from p2p.peer import (
BasePeer,
PeerSubscriber,
)
from p2p.peer_pool import (
BasePeerPool,
)
from p2p.protocol import (
Command,
PayloadType,
)
from p2p.service import (
BaseService,
)
from trinity.endpoint import (
TrinityEventBusEndpoint,
)
from .events import (
ConnectToNodeCommand,
DisconnectPeerEvent,
HasRemoteEvent,
PeerCountRequest,
PeerCountResponse,
)
TPeer = TypeVar('TPeer', bound=BasePeer)
TStreamEvent = TypeVar('TStreamEvent', bound=HasRemoteEvent)
class PeerPoolEventServer(BaseService, PeerSubscriber, Generic[TPeer]):
"""
Base class to create a bridge between the ``PeerPool`` and the event bus so that peer
messages become available to external processes (e.g. isolated plugins). In the opposite
direction, other processes can also retrieve information or execute actions on the peer pool by
sending specific events through the event bus that the ``PeerPoolEventServer`` answers.
This class bridges all common APIs but protocol specific communication can be enabled through
subclasses that add more handlers.
"""
msg_queue_maxsize: int = 2000
subscription_msg_types: FrozenSet[Type[Command]] = frozenset({})
def __init__(self,
event_bus: TrinityEventBusEndpoint,
peer_pool: BasePeerPool,
token: CancelToken = None) -> None:
super().__init__(token)
self.peer_pool = peer_pool
self.event_bus = event_bus
async def _run(self) -> None:
self.logger.debug("Running %s", self.__class__.__name__)
self.run_daemon_event(
DisconnectPeerEvent,
lambda peer, event: peer.disconnect_nowait(event.reason)
)
self.run_daemon_task(self.handle_peer_count_requests())
self.run_daemon_task(self.handle_connect_to_node_requests())
self.run_daemon_task(self.handle_native_peer_messages())
await self.cancellation()
def run_daemon_event(self,
event_type: Type[TStreamEvent],
event_handler_fn: Callable[[TPeer, TStreamEvent], Any]) -> None:
"""
Register a handler to be run every time that an event of type ``event_type`` appears.
"""
self.run_daemon_task(self.handle_stream(event_type, event_handler_fn))
@abstractmethod
async def handle_native_peer_message(self,
remote: Node,
cmd: Command,
msg: PayloadType) -> None:
"""
Process every native peer message. Subclasses should overwrite this to forward specific
peer messages on the event bus. The handler is called for every message that is defined in
``self.subscription_msg_types``.
"""
pass
def get_peer(self, remote: Node) -> TPeer:
"""
Look up and return a peer from the ``PeerPool`` that matches the given node.
Raise ``PeerConnectionLost`` if the peer is no longer in the pool or is winding down.
"""
try:
peer = self.peer_pool.connected_nodes[remote]
except KeyError:
self.logger.debug("Peer with remote %s does not exist in the pool anymore", remote)
raise PeerConnectionLost()
else:
if not peer.is_operational:
self.logger.debug("Peer %s is not operational when selecting from pool", peer)
raise PeerConnectionLost()
else:
return cast(TPeer, peer)
async def handle_connect_to_node_requests(self) -> None:
async for command in self.wait_iter(self.event_bus.stream(ConnectToNodeCommand)):
self.logger.debug('Received request to connect to %s', command.remote)
self.run_task(self.peer_pool.connect_to_node(command.remote))
async def handle_peer_count_requests(self) -> None:
async for req in self.wait_iter(self.event_bus.stream(PeerCountRequest)):
await self.event_bus.broadcast(
PeerCountResponse(len(self.peer_pool)),
req.broadcast_config()
)
async def handle_stream(self,
event_type: Type[TStreamEvent],
event_handler_fn: Callable[[TPeer, TStreamEvent], Any]) -> None:
async for event in self.wait_iter(self.event_bus.stream(event_type)):
try:
peer = self.get_peer(event.remote)
except PeerConnectionLost:
pass
else:
event_handler_fn(peer, event)
async def handle_native_peer_messages(self) -> None:
with self.subscribe(self.peer_pool):
while self.is_operational:
peer, cmd, msg = await self.wait(self.msg_queue.get())
await self.handle_native_peer_message(peer.remote, cmd, msg)
class DefaultPeerPoolEventServer(PeerPoolEventServer[BasePeer]):
async def handle_native_peer_message(self,
remote: Node,
cmd: Command,
msg: PayloadType) -> None:
pass
| 33.391566
| 99
| 0.625293
| 612
| 5,543
| 5.485294
| 0.303922
| 0.023831
| 0.021448
| 0.020256
| 0.191838
| 0.166816
| 0.137623
| 0.116771
| 0.088174
| 0.088174
| 0
| 0.002569
| 0.297853
| 5,543
| 165
| 100
| 33.593939
| 0.859969
| 0.13296
| 0
| 0.20339
| 0
| 0
| 0.036954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025424
| false
| 0.025424
| 0.09322
| 0
| 0.161017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fe475b8134a31f2b77a708e5769cd268cfc749
| 18,488
|
py
|
Python
|
tests/e2e/performance/csi_tests/test_pvc_creation_deletion_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | 1
|
2019-09-17T08:38:05.000Z
|
2019-09-17T08:38:05.000Z
|
tests/e2e/performance/csi_tests/test_pvc_creation_deletion_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | 1
|
2021-08-30T20:06:00.000Z
|
2021-09-30T20:05:46.000Z
|
tests/e2e/performance/csi_tests/test_pvc_creation_deletion_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | 2
|
2019-09-17T10:04:14.000Z
|
2022-02-07T16:36:49.000Z
|
"""
Test to verify performance of PVC creation and deletion
for RBD, CephFS and RBD-Thick interfaces
"""
import time
import logging
import datetime
import pytest
import ocs_ci.ocs.exceptions as ex
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor
from uuid import uuid4
from ocs_ci.framework.testlib import performance
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs.perfresult import PerfResult
from ocs_ci.framework import config
log = logging.getLogger(__name__)
class ResultsAnalyse(PerfResult):
"""
This class generates results for all tests as one unit
and saves them to an elastic search server on the cluster
"""
def __init__(self, uuid, crd, full_log_path):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
full_log_path (str): the path of the results files to be found
"""
super(ResultsAnalyse, self).__init__(uuid, crd)
self.new_index = "pvc_create_delete_fullres"
self.full_log_path = full_log_path
# make sure we have connection to the elastic search server
self.es_connect()
@performance
class TestPVCCreationDeletionPerformance(PASTest):
"""
Test to verify performance of PVC creation and deletion
"""
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.uuid = uuid4().hex
self.crd_data = {
"spec": {
"test_user": "Homer simpson",
"clustername": "test_cluster",
"elasticsearch": {
"server": config.PERF.get("production_es_server"),
"port": config.PERF.get("production_es_port"),
"url": f"http://{config.PERF.get('production_es_server')}:{config.PERF.get('production_es_port')}",
},
}
}
if self.dev_mode:
self.crd_data["spec"]["elasticsearch"] = {
"server": config.PERF.get("dev_es_server"),
"port": config.PERF.get("dev_es_port"),
"url": f"http://{config.PERF.get('dev_es_server')}:{config.PERF.get('dev_es_port')}",
}
@pytest.fixture()
def base_setup(self, interface_type, storageclass_factory, pod_factory):
"""
A setup phase for the test
Args:
interface_type: A fixture to iterate over ceph interfaces
storageclass_factory: A fixture to create everything needed for a
storageclass
pod_factory: A fixture to create new pod
"""
self.interface = interface_type
if self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc_obj = storageclass_factory(
interface=constants.CEPHBLOCKPOOL,
new_rbd_pool=True,
rbd_thick_provision=True,
)
else:
self.sc_obj = storageclass_factory(self.interface)
self.pod_factory = pod_factory
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty FIOResultsAnalyse object
Returns:
FIOResultsAnalyse (obj): the input object fill with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("storageclass", self.sc)
full_results.add_key("index", full_results.new_index)
return full_results
@pytest.mark.parametrize(
argnames=["interface_type", "pvc_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "5Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "15Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "25Gi"],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
def test_pvc_creation_deletion_measurement_performance(
self, teardown_factory, pvc_size
):
"""
Measuring PVC creation and deletion times for pvc samples
Verifying that those times are within the required limits
"""
# Getting the full path for the test logs
self.full_log_path = get_full_test_logs_path(cname=self)
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
elif self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
elif self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc = "RBD-Thick"
self.full_log_path += f"-{self.sc}-{pvc_size}"
log.info(f"Logs file path name is : {self.full_log_path}")
self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.get_env_info()
# Initialize the results doc file.
self.full_results = self.init_full_results(
ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path)
)
self.full_results.add_key("pvc_size", pvc_size)
num_of_samples = 5
accepted_creation_time = (
600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 1
)
# accepted deletion time for RBD is 1 sec, for CephFS is 2 secs and for RBD Thick is 5 secs
if self.interface == constants.CEPHFILESYSTEM:
accepted_deletion_time = 2
elif self.interface == constants.CEPHBLOCKPOOL:
accepted_deletion_time = 1
else:
accepted_deletion_time = 5
self.full_results.add_key("samples", num_of_samples)
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
creation_time_measures = []
deletion_time_measures = []
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
for i in range(num_of_samples):
logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
helpers.wait_for_resource_state(
pvc_obj, constants.STATUS_BOUND, timeout=timeout
)
pvc_obj.reload()
creation_time = performance_lib.measure_pvc_creation_time(
self.interface, pvc_obj.name, start_time
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
)
if creation_time > accepted_creation_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
f"{accepted_creation_time} seconds."
)
creation_time_measures.append(creation_time)
pv_name = pvc_obj.backed_pv
pvc_reclaim_policy = pvc_obj.reclaim_policy
pod_obj = self.write_file_on_pvc(pvc_obj)
pod_obj.delete(wait=True)
teardown_factory(pvc_obj)
logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
helpers.validate_pv_delete(pvc_obj.backed_pv)
deletion_time = helpers.measure_pvc_deletion_time(
self.interface, pv_name
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
)
if deletion_time > accepted_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
f"{accepted_deletion_time} seconds."
)
deletion_time_measures.append(deletion_time)
else:
logging.info(
f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
f" therefore not measuring deletion time for this PVC."
)
creation_average = self.process_time_measurements(
"creation",
creation_time_measures,
accepted_creation_deviation_percent,
msg_prefix,
)
self.full_results.add_key("creation-time", creation_average)
deletion_average = self.process_time_measurements(
"deletion",
deletion_time_measures,
accepted_deletion_deviation_percent,
msg_prefix,
)
self.full_results.add_key("deletion-time", deletion_average)
self.full_results.all_results["creation"] = creation_time_measures
self.full_results.all_results["deletion"] = deletion_time_measures
self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.full_results.add_key(
"test_time", {"start": self.start_time, "end": self.end_time}
)
self.full_results.es_write()
log.info(f"The Result can be found at : {self.full_results.results_link()}")
def process_time_measurements(
self, action_name, time_measures, accepted_deviation_percent, msg_prefix
):
"""
Analyses the given time measured. If the standard deviation of these times is bigger than the
provided accepted deviation percent, fails the test
Args:
action_name (str): Name of the action for which these measurements were collected; used for the logging
time_measures (list of floats): A list of time measurements
accepted_deviation_percent (int): Accepted deviation percent to which computed standard deviation may be
compared
msg_prefix (str) : A string for comprehensive logging
Returns:
(float) The average value of the provided time measurements
"""
average = statistics.mean(time_measures)
log.info(
f"{msg_prefix} The average {action_name} time for the sampled {len(time_measures)} "
f"PVCs is {average} seconds."
)
if self.interface == constants.CEPHBLOCKPOOL_THICK:
st_deviation = statistics.stdev(time_measures)
st_deviation_percent = st_deviation / average * 100.0
if st_deviation_percent > accepted_deviation_percent:
log.error(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% which is bigger than accepted {accepted_deviation_percent}."
)
else:
log.info(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% and is within the accepted range."
)
self.full_results.add_key(
f"{action_name}_deviation_pct", st_deviation_percent
)
return average
def write_file_on_pvc(self, pvc_obj, filesize=1):
"""
Writes a file on given PVC
Args:
pvc_obj: PVC object to write a file on
filesize: size of file to write (in GB - default is 1GB)
Returns:
Pod on this pvc on which the file was written
"""
pod_obj = self.pod_factory(
interface=self.interface, pvc=pvc_obj, status=constants.STATUS_RUNNING
)
# filesize to be written is always 1 GB
file_size = f"{int(filesize * 1024)}M"
log.info(f"Starting IO on the POD {pod_obj.name}")
# Going to run only write IO
pod_obj.fillup_fs(size=file_size, fio_filename=f"{pod_obj.name}_file")
# Wait for the fio to finish
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"IO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info("IO on the PVC has finished")
return pod_obj
@pytest.mark.parametrize(
argnames=["interface_type"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2618")
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
"""
Measuring PVC deletion time of 120 PVCs in 180 seconds
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
Returns:
"""
number_of_pvcs = 120
pvc_size = "1Gi"
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
log.info(f"{msg_prefix} Start creating new 120 PVCs")
pvc_objs, _ = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=number_of_pvcs,
size=pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state,
pvc_obj,
constants.STATUS_BOUND,
timeout=timeout,
)
executor.submit(pvc_obj.reload)
pod_objs = []
for pvc_obj in pvc_objs:
pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
pod_objs.append(pod_obj)
# Get pvc_name, require pvc_name to fetch deletion time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
log.info(f"{msg_prefix} Preparing to delete 120 PVC")
# Delete PVC
for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
pod_obj.delete(wait=True)
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=self.interface, pv_name_list=pv_name_list
)
log.info(
f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
)
# accepted deletion time is 2 secs for each PVC
accepted_pvc_deletion_time = number_of_pvcs * 2
for del_time in pvc_deletion_time.values():
if del_time > accepted_pvc_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
f"greater than {accepted_pvc_deletion_time} seconds"
)
logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
for name, a_time in pvc_deletion_time.items():
logging.info(f"{name} deletion time is: {a_time} seconds")
| 38.119588
| 119
| 0.59855
| 2,140
| 18,488
| 4.938318
| 0.164019
| 0.039743
| 0.013248
| 0.029523
| 0.370553
| 0.293906
| 0.222937
| 0.205905
| 0.135503
| 0.100114
| 0
| 0.007208
| 0.317179
| 18,488
| 484
| 120
| 38.198347
| 0.829927
| 0.146906
| 0
| 0.274854
| 0
| 0
| 0.16204
| 0.020148
| 0
| 0
| 0
| 0
| 0.002924
| 1
| 0.026316
| false
| 0
| 0.046784
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0fe959730942d4fbe3c43eb35ca77c0cc852bbc
| 1,233
|
py
|
Python
|
templates/t/searchresult_withnone.py
|
MikeBirdsall/food-log
|
5edc1fa515d5e2721e96afb7d2b437296903a31d
|
[
"MIT"
] | null | null | null |
templates/t/searchresult_withnone.py
|
MikeBirdsall/food-log
|
5edc1fa515d5e2721e96afb7d2b437296903a31d
|
[
"MIT"
] | 27
|
2017-07-01T19:20:48.000Z
|
2019-03-07T06:04:22.000Z
|
templates/t/searchresult_withnone.py
|
MikeBirdsall/food-log
|
5edc1fa515d5e2721e96afb7d2b437296903a31d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from jinja2 import Environment, FileSystemLoader
def spacenone(value):
return "" if value is None else str(value)
results = [
dict(
description="Noodles and Company steak Stromboli",
comment="",
size="small",
cals=530,
carbs=50,
fat=25,
protein=27,
score=30),
dict(
description="Steak sandwich",
comment="",
size="4 oz and bun",
cals=480,
carbs=44,
fat=20,
protein=27,
score=30),
dict(
description="chipotle tacos",
comment="Steak, no beans, gu...",
size="",
cals=285,
carbs=None,
fat=16,
protein=None,
score=30),
dict(
description="Steak Sandwich",
comment="",
size="",
cals=380,
carbs=45,
fat=3.5,
protein=34,
score=30),
]
input_ = dict(
title="Search for Courses",
h1="Full Text Search: steak NOT shake",
results=results,
)
env = Environment(loader=FileSystemLoader(".."))
env.filters['spacenone'] = spacenone
template = env.get_template("searchresult.html")
output = template.render(input_)
print(output)
| 19.887097
| 58
| 0.544201
| 132
| 1,233
| 5.060606
| 0.583333
| 0.08982
| 0.049401
| 0.098802
| 0.197605
| 0.197605
| 0.137725
| 0.137725
| 0
| 0
| 0
| 0.052885
| 0.325223
| 1,233
| 61
| 59
| 20.213115
| 0.75
| 0.013788
| 0
| 0.294118
| 0
| 0
| 0.160626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.019608
| 0.019608
| 0.058824
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ff8f36b2a500b9d978be307fa6e00f7161603f
| 2,146
|
py
|
Python
|
payments/views.py
|
aman-roy/pune.pycon.org
|
f56cc948bd56767110d337c694ecbf5540bdf4b9
|
[
"MIT"
] | null | null | null |
payments/views.py
|
aman-roy/pune.pycon.org
|
f56cc948bd56767110d337c694ecbf5540bdf4b9
|
[
"MIT"
] | null | null | null |
payments/views.py
|
aman-roy/pune.pycon.org
|
f56cc948bd56767110d337c694ecbf5540bdf4b9
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from payments.models import Invoice, RazorpayKeys
from payments.razorpay.razorpay_payments import RazorpayPayments
from payments.models import Payment, Order
import json
@csrf_exempt
def webhook(request):
if request.method == 'POST':
keys = RazorpayKeys.objects.first()
payment = RazorpayPayments(keys.api_key, keys.api_secret)
data = json.loads(request.body)
if 'payload' not in data or 'invoice' not in data['payload']:
return JsonResponse({"message": "Invalid Data"})
invoice_entity = data['payload']['invoice']['entity']
order_entity = data['payload']['order']['entity']
payment_entity = data['payload']['payment']['entity']
invoice = Invoice.objects.get(invoice_id=invoice_entity['id'])
invoice.status = invoice_entity['status']
invoice.save()
payment.save_payment(payment_entity)
payment.save_order(order_entity)
return JsonResponse({"message": "Success"})
return JsonResponse({"message": "Method Not Allowed"})
def sync(request):
keys = RazorpayKeys.objects.first()
payment = RazorpayPayments(keys.api_key, keys.api_secret)
invoices = Invoice.objects.all()
for invoice in invoices:
invoice_details = payment.fetch_invoices(invoice.invoice_id)
invoice.status = invoice_details['status']
invoice.save()
if invoice.status == 'paid':
orders = Order.objects.filter(order_id=invoice_details['order_id'])
if len(orders) == 0:
order_details = payment.fetch_orders(
invoice_details['order_id'])
payment.save_order(order_details)
if invoice_details['payment_id']:
payments = Payment.objects.filter(payment_id=invoice_details['payment_id'])
if len(payments) == 0:
payment_details = payment.fetch_payment(invoice_details['payment_id'])
payment.save_payment(payment_details)
return JsonResponse({"message": "synced"})
| 40.490566
| 91
| 0.66356
| 237
| 2,146
| 5.835443
| 0.248945
| 0.07086
| 0.072307
| 0.049892
| 0.107014
| 0.107014
| 0.107014
| 0.107014
| 0.107014
| 0.107014
| 0
| 0.0012
| 0.223672
| 2,146
| 53
| 92
| 40.490566
| 0.828932
| 0
| 0
| 0.136364
| 0
| 0
| 0.101537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0ff97e8d61ff585dcd9a0102ba24b2e2528bca2
| 6,541
|
py
|
Python
|
src/convnet/image_classifier.py
|
danschef/gear-detector
|
153d1031778f183ac38edf0532d2f266029c5ea7
|
[
"MIT"
] | 1
|
2020-07-15T20:12:55.000Z
|
2020-07-15T20:12:55.000Z
|
src/convnet/image_classifier.py
|
danschef/gear-detector
|
153d1031778f183ac38edf0532d2f266029c5ea7
|
[
"MIT"
] | null | null | null |
src/convnet/image_classifier.py
|
danschef/gear-detector
|
153d1031778f183ac38edf0532d2f266029c5ea7
|
[
"MIT"
] | null | null | null |
import configparser
import os
import sys
from time import localtime, strftime, mktime
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from net import Net
from geo_helper import store_image_bounds
from image_helper import CLASSES
from image_helper import save_image
from image_helper import test_set_loader
from image_helper import train_set_loader
from image_helper import validation_set_loader
CONFIG = configparser.ConfigParser()
CONFIG.read('./src/config.ini')
###########################################
# Training Stage
###########################################
def train(net, epochs=50, learning_rate=0.001):
start_time = strftime('%H:%M:%S', localtime())
print(f"Started training at: {start_time}")
datetime = strftime("%Y%m%d_%H%M", localtime())
logfile = f"{CONFIG['CNN Paths']['accuracy_log_path']}/{datetime}.log"
###########################################
# Loss Function
###########################################
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, (images, labels) in enumerate(train_set_loader(), 0):
# Wrap images and labels into Variables
images, labels = Variable(images), Variable(labels)
# Clear all accumulated gradients
optimizer.zero_grad()
# Predict classes using images from the test set
outputs = net(images)
# Compute the loss based on the predictions and actual labels
loss = criterion(outputs, labels)
# Backpropagate the loss
loss.backward()
# Adjust parameters according to the computed gradients
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 100 == 99: # print every 100 mini-batches
print('[%d, %5d] loss: %.3f, accuracy: %.3f' %
(epoch + 1, i + 1, running_loss / 100, validate(logfile, net)))
running_loss = 0.0
end_time = strftime('%H:%M:%S', localtime())
print(f"Finished Training: {end_time}")
#####################################
# Validation stage
#####################################
def validate(logfile, net):
dataiter = iter(validation_set_loader())
hits = 0.0
for idx, item in enumerate(dataiter):
images, labels = item
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
if (labels == predicted[0]).all():
hits += 1
accuracy = hits / (idx + 1)
log_accuracy(logfile, accuracy)
return accuracy
def log_accuracy(filename, accuracy):
with open(filename, "a") as file:
file.write(str(accuracy)+ '\n')
#####################################
# Prediction stage
#####################################
def predict(net):
print(f"Prediction started at: {strftime('%H:%M:%S', localtime())}")
dataiter = iter(test_set_loader())
prediction_cnt = {
'cloud': 0,
'edge': 0,
'land': 0,
'nets': 0,
'rock': 0,
'vessel': 0,
'water': 0
}
datetime = strftime("%Y%m%d_%H%M", localtime())
prediction_log = f"{CONFIG['CNN Paths']['predicted_geodata_path']}/{datetime}.json"
prediction_img_folder = f"{CONFIG['CNN Paths']['predicted_imagery_path']}/{datetime}"
for idx, item in enumerate(dataiter):
if idx > int(CONFIG['CNN Prediction']['batch_size']):
break
if idx % 100 == 0:
print('.', end='', flush=True)
images, _labels = item
##########################################################
# Feed the images into the CNN and check what it predicts
##########################################################
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
# Save images from prediction for visual check
if CLASSES[predicted[0]] == 'nets':
image_path = dataiter._dataset.imgs[idx][0]
save_image(image_path, prediction_img_folder)
store_image_bounds(image_path, prediction_log)
prediction_cnt[CLASSES[predicted[0]]] += 1
print(f"\nPrediction ended at: {strftime('%H:%M:%S', localtime())}")
print(f"\nPredicted: {prediction_cnt}")
def model_full_path(path, checkpoint):
return f"{path}_{checkpoint}.pt"
################################################################
# Train network or use existing one for prediction
################################################################
def main(mode=''):
image_bands = int(CONFIG['CNN Training']['image_bands'])
training_epochs = int(CONFIG['CNN Training']['epochs'])
resume_epochs = int(CONFIG['CNN Resume Training']['epochs'])
learning_rate = float(CONFIG['CNN Training']['learning_rate'])
batch_size = CONFIG['CNN Prediction']['batch_size']
if len(sys.argv) > 1:
mode = sys.argv[1]
net = Net(in_channels=image_bands)
model_path = CONFIG['CNN Paths']['model_path']
checkpoint = CONFIG['CNN Prediction']['checkpoint']
# Use network for prediction
if mode == 'predict' and os.path.exists(model_full_path(model_path, checkpoint)):
print(f"Use trained network {checkpoint} for prediction of max {batch_size} images")
# Load existing model
model = torch.load(model_full_path(model_path, checkpoint))
net.load_state_dict(model)
predict(net)
# Start training
elif mode == 'train':
print(f"Start network training for {training_epochs} epochs")
train(net, training_epochs, learning_rate)
# Save model after training
checkpoint = strftime("%Y%m%d_%H%M", localtime())
torch.save(net.state_dict(), model_full_path(model_path, checkpoint))
# Resume training
elif mode == 'resume':
checkpoint = CONFIG['CNN Resume Training']['checkpoint']
print(f"Resume training on Model {checkpoint} for {resume_epochs} epochs")
# Load existing model and resume training
model = torch.load(model_full_path(model_path, checkpoint))
net.load_state_dict(model)
train(net, resume_epochs, learning_rate)
torch.save(net.state_dict(), model_full_path(model_path, checkpoint))
else:
print('No mode provided.')
main()
| 31.599034
| 92
| 0.581257
| 748
| 6,541
| 4.941176
| 0.262032
| 0.029221
| 0.021104
| 0.028409
| 0.214286
| 0.186147
| 0.139069
| 0.126082
| 0.093615
| 0.093615
| 0
| 0.010439
| 0.223819
| 6,541
| 206
| 93
| 31.752427
| 0.71755
| 0.105947
| 0
| 0.135593
| 0
| 0
| 0.186847
| 0.038586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.127119
| 0.008475
| 0.194915
| 0.09322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
46004af1bf9a4f4788952ff849b76ab958f79e1c
| 3,035
|
py
|
Python
|
src/modules/AlphabetPlotter.py
|
aaanh/duplicated_accelcamp
|
7d4b60ace023bede907f8ed367ba492731a1951d
|
[
"FTL",
"CNRI-Python",
"RSA-MD"
] | null | null | null |
src/modules/AlphabetPlotter.py
|
aaanh/duplicated_accelcamp
|
7d4b60ace023bede907f8ed367ba492731a1951d
|
[
"FTL",
"CNRI-Python",
"RSA-MD"
] | 2
|
2021-05-21T16:31:41.000Z
|
2021-08-25T16:05:48.000Z
|
src/modules/AlphabetPlotter.py
|
aaanh/duplicated_accelcamp
|
7d4b60ace023bede907f8ed367ba492731a1951d
|
[
"FTL",
"CNRI-Python",
"RSA-MD"
] | null | null | null |
import tkinter as tk
from tkinter import filedialog
import csv
import matplotlib.pyplot as plt
root = tk.Tk(screenName=':0.0')
root.withdraw()
file_path = filedialog.askopenfilename()
lastIndex = len(file_path.split('/')) - 1
v0 = [0, 0, 0]
x0 = [0, 0, 0]
fToA = 1
error = 0.28
errorZ = 3
t = []
time = []
m = [[] for i in range(3)]
magnitude = [[] for i in range(3)]
shift_x = 0
shift_y = 0
# For when the data starts at (2,1)
if file_path.split('/')[lastIndex].split('.')[2] == "pocket":
shift_x = 2
shift_y = 1
error = 0.3
fToA = 1
# For when the data starts at (0,0)
elif file_path.split('/')[lastIndex].split('.')[2] == "pocket_mobile":
shift_x = 0
shift_y = 0
error = 0.3
fToA = 1
# For when the data starts at (1,0)
elif file_path.split('/')[lastIndex].split('.')[2] == "android":
shift_x = 0
shift_y = 1
error = 0.02
fToA = 9.81
errorZ = 100
shift = 0
uselessboolean = True
with open(file_path, 'r+') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if shift < shift_y:
shift += 1
else:
t = row[shift_x]
m[0] = row[1 + shift_x]
m[1] = row[2 + shift_x]
m[2] = row[3 + shift_x]
time.append(float(t))
for i in range(0, 3):
magnitude[i].append(float(m[i]) if abs(float(m[i])) > error else 0)
acceleration = [[(j * fToA) for j in i] for i in magnitude]
acceleration[2] = [i - 9.805 for i in acceleration[2]]
# Translates Data into Position
velocity = [[0 for i in time] for i in range(3)]
position = [[0 for i in time] for i in range(3)]
for j in range(3):
velocity[j][0] = v0[j]
for i in range(1, len(time)):
velocity[j][i] = velocity[j][i - 1] + acceleration[j][i - 1] * (time[i] - time[i - 1])
for j in range(3):
position[j][0] = x0[j]
for i in range(1, len(time)):
position[j][i] = position[j][i - 1] + velocity[j][i - 1] * (time[i] - time[i - 1])
for i in range(len(acceleration[2])):
if abs(velocity[2][i]) > errorZ:
position[0][i] = 0
position[1][i] = 0
fig, axs = plt.subplots(2)
axs[0].plot(time, acceleration[0])
axs[0].set_xlabel('Time (s)')
axs[0].set_ylabel('AccelerationX (m/s^2)')
axs[1].plot(time, acceleration[1])
axs[1].set_xlabel('Time (s)')
axs[1].set_ylabel('AccelerationY (m/s^2)')
'''
axs[2].scatter(time, acceleration[2])
axs[2].set_xlabel('Time (s)')
axs[2].set_ylabel('AccelerationZ (m/s^2)')
axs[3].scatter(time, velocity[2])
axs[3].set_xlabel('Time (s)')
axs[3].set_ylabel('VelocityZ (m/s)')
axs[4].scatter(time, position[2])
axs[4].set_xlabel('Time (s)')
axs[4].set_ylabel('PositionZ (m)')
axs.scatter(position[0], position[1], marker = "_", linewidth = 70)
axs.set_xlabel('PositionX')
axs.set_ylabel('PositionY')
plt.plot(position[0], position[1], marker = '_', markersize = 30, linewidth = 3, markeredgewidth = 10)'''
plt.show()
| 29.182692
| 106
| 0.577595
| 494
| 3,035
| 3.479757
| 0.202429
| 0.027923
| 0.041885
| 0.051193
| 0.319372
| 0.200116
| 0.17103
| 0.14776
| 0.086097
| 0.066318
| 0
| 0.054545
| 0.23888
| 3,035
| 104
| 107
| 29.182692
| 0.68961
| 0.043163
| 0
| 0.216216
| 0
| 0
| 0.043517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4604dc5f65cd5f7e83502d4f9fd70d81c2c12903
| 4,178
|
py
|
Python
|
cohesity_management_sdk/models/health_tile.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/health_tile.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/health_tile.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.alert
class HealthTile(object):
"""Implementation of the 'HealthTile' model.
Health for Dashboard.
Attributes:
capacity_bytes (long|int): Raw Cluster Capacity in Bytes. This is not
usable capacity and does not take replication factor into
account.
cluster_cloud_usage_bytes (long|int): Usage in Bytes on the cloud.
last_day_alerts (list of Alert): Alerts in last 24 hours.
last_day_num_criticals (long|int): Number of Critical Alerts.
last_day_num_warnings (long|int): Number of Warning Alerts.
num_nodes (int): Number of nodes in the cluster.
num_nodes_with_issues (int): Number of nodes in the cluster that are
unhealthy.
percent_full (float): Percent the cluster is full.
raw_used_bytes (long|int): Raw Bytes used in the cluster.
"""
# Create a mapping from Model property names to API property names
_names = {
"capacity_bytes":'capacityBytes',
"cluster_cloud_usage_bytes":'clusterCloudUsageBytes',
"last_day_alerts":'lastDayAlerts',
"last_day_num_criticals":'lastDayNumCriticals',
"last_day_num_warnings":'lastDayNumWarnings',
"num_nodes":'numNodes',
"num_nodes_with_issues":'numNodesWithIssues',
"percent_full":'percentFull',
"raw_used_bytes":'rawUsedBytes'
}
def __init__(self,
capacity_bytes=None,
cluster_cloud_usage_bytes=None,
last_day_alerts=None,
last_day_num_criticals=None,
last_day_num_warnings=None,
num_nodes=None,
num_nodes_with_issues=None,
percent_full=None,
raw_used_bytes=None):
"""Constructor for the HealthTile class"""
# Initialize members of the class
self.capacity_bytes = capacity_bytes
self.cluster_cloud_usage_bytes = cluster_cloud_usage_bytes
self.last_day_alerts = last_day_alerts
self.last_day_num_criticals = last_day_num_criticals
self.last_day_num_warnings = last_day_num_warnings
self.num_nodes = num_nodes
self.num_nodes_with_issues = num_nodes_with_issues
self.percent_full = percent_full
self.raw_used_bytes = raw_used_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
capacity_bytes = dictionary.get('capacityBytes')
cluster_cloud_usage_bytes = dictionary.get('clusterCloudUsageBytes')
last_day_alerts = None
if dictionary.get('lastDayAlerts') != None:
last_day_alerts = list()
for structure in dictionary.get('lastDayAlerts'):
last_day_alerts.append(cohesity_management_sdk.models.alert.Alert.from_dictionary(structure))
last_day_num_criticals = dictionary.get('lastDayNumCriticals')
last_day_num_warnings = dictionary.get('lastDayNumWarnings')
num_nodes = dictionary.get('numNodes')
num_nodes_with_issues = dictionary.get('numNodesWithIssues')
percent_full = dictionary.get('percentFull')
raw_used_bytes = dictionary.get('rawUsedBytes')
# Return an object of this model
return cls(capacity_bytes,
cluster_cloud_usage_bytes,
last_day_alerts,
last_day_num_criticals,
last_day_num_warnings,
num_nodes,
num_nodes_with_issues,
percent_full,
raw_used_bytes)
| 38.330275
| 109
| 0.646003
| 476
| 4,178
| 5.359244
| 0.243697
| 0.063113
| 0.05488
| 0.060368
| 0.180713
| 0.044688
| 0.044688
| 0
| 0
| 0
| 0
| 0.002349
| 0.28674
| 4,178
| 108
| 110
| 38.685185
| 0.853691
| 0.319052
| 0
| 0
| 0
| 0
| 0.160622
| 0.049223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.016949
| 0
| 0.118644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4606c41942e35425a62e84ea16612cc308900a33
| 10,472
|
py
|
Python
|
tests/test_exploration.py
|
lionelkusch/neurolib
|
714eef48616af0ebdb62decc84826221472398f9
|
[
"MIT"
] | null | null | null |
tests/test_exploration.py
|
lionelkusch/neurolib
|
714eef48616af0ebdb62decc84826221472398f9
|
[
"MIT"
] | null | null | null |
tests/test_exploration.py
|
lionelkusch/neurolib
|
714eef48616af0ebdb62decc84826221472398f9
|
[
"MIT"
] | null | null | null |
import logging
import os
import random
import string
import time
import unittest
import neurolib.utils.paths as paths
import neurolib.utils.pypetUtils as pu
import numpy as np
import pytest
import xarray as xr
from neurolib.models.aln import ALNModel
from neurolib.models.fhn import FHNModel
from neurolib.models.multimodel import MultiModel
from neurolib.models.multimodel.builder.fitzhugh_nagumo import FitzHughNagumoNetwork
from neurolib.optimize.exploration import BoxSearch
from neurolib.utils.loadData import Dataset
from neurolib.utils.parameterSpace import ParameterSpace
def randomString(stringLength=10):
"""Generate a random string of fixed length"""
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(stringLength))
class TestBoxSearch(unittest.TestCase):
"""
Basic tests.
"""
def test_assertions(self):
parameters = ParameterSpace(
{"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)}, kind="sequence"
)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=parameters)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=None)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=parameters, evalFunction=None)
def test_fillin_default_parameters_for_sequential(self):
in_dict = {"a": [None, None, 1, 2], "b": [4, 5, None, None]}
SHOULD_BE = {"a": [0, 0, 1, 2], "b": [4, 5, 12, 12]}
model_params = {"a": 0, "b": 12}
parameters = ParameterSpace({"mue_ext_mean": [1.0, 2.0]})
search = BoxSearch(model=ALNModel(), parameterSpace=parameters)
out_dict = search._fillin_default_parameters_for_sequential(in_dict, model_params)
self.assertDictEqual(out_dict, SHOULD_BE)
class TestExplorationSingleNode(unittest.TestCase):
"""
ALN single node exploration.
"""
def test_single_node(self):
start = time.time()
model = ALNModel()
parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)})
search = BoxSearch(model, parameters, filename="test_single_nodes.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
for i in search.dfResults.index:
search.dfResults.loc[i, "max_r"] = np.max(
search.results[i]["rates_exc"][:, -int(1000 / model.params["dt"]) :]
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationBrainNetwork(unittest.TestCase):
"""
FHN brain network simulation with BOLD simulation.
"""
def test_fhn_brain_network_exploration(self):
ds = Dataset("hcp")
model = FHNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
model.params.duration = 10 * 1000 # ms
model.params.dt = 0.2
model.params.bold = True
parameters = ParameterSpace(
{
"x_ext": [np.ones((model.params["N"],)) * a for a in np.linspace(0, 2, 2)],
"K_gl": np.linspace(0, 2, 2),
"coupling": ["additive", "diffusive"],
},
kind="grid",
)
search = BoxSearch(model=model, parameterSpace=parameters, filename="test_fhn_brain_network_exploration.hdf")
search.run(chunkwise=True, bold=True)
pu.getTrajectorynamesInFile(os.path.join(paths.HDF_DIR, "test_fhn_brain_network_exploration.hdf"))
search.loadDfResults()
search.getRun(0, pypetShortNames=True)
search.getRun(0, pypetShortNames=False)
search.loadResults()
# firing rate xr
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
# bold xr
dataarray = search.xr(bold=True)
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
search.info()
class TestExplorationBrainNetworkPostprocessing(unittest.TestCase):
"""
ALN brain network simulation with custom evaluation function.
"""
@classmethod
def setUpClass(cls):
# def test_brain_network_postprocessing(self):
ds = Dataset("hcp")
model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
# Resting state fits
model.params["mue_ext_mean"] = 1.57
model.params["mui_ext_mean"] = 1.6
model.params["sigma_ou"] = 0.09
model.params["b"] = 5.0
model.params["signalV"] = 2
model.params["dt"] = 0.2
model.params["duration"] = 0.2 * 60 * 1000
# multi stage evaluation function
def evaluateSimulation(traj):
model = search.getModelFromTraj(traj)
model.randomICs()
model.params["dt"] = 0.2
model.params["duration"] = 4 * 1000.0
model.run(bold=True)
result_dict = {"outputs": model.outputs}
search.saveToPypet(result_dict, traj)
# define and run exploration
parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)})
search = BoxSearch(
evalFunction=evaluateSimulation,
model=model,
parameterSpace=parameters,
filename=f"test_brain_postprocessing_{randomString(20)}.hdf",
)
search.run()
cls.model = model
cls.search = search
cls.ds = ds
def test_getRun(self):
self.search.getRun(0)
def test_loadResults(self):
self.search.loadResults()
def test_loadResults_all_False(self):
self.search.loadResults(all=False)
class TestCustomParameterExploration(unittest.TestCase):
"""Exploration with custom function"""
def test_circle_exploration(self):
def explore_me(traj):
pars = search.getParametersFromTraj(traj)
# let's calculate the distance to a circle
computation_result = abs((pars["x"] ** 2 + pars["y"] ** 2) - 1)
result_dict = {"scalar_result": computation_result, "list_result": [1, 2, 3, 4], "array_result": np.ones(3)}
search.saveToPypet(result_dict, traj)
parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)})
search = BoxSearch(evalFunction=explore_me, parameterSpace=parameters, filename="test_circle_exploration.hdf")
search.run()
search.loadResults(pypetShortNames=False)
# call the result dataframe
search.dfResults
# test integrity of dataframe
for i in search.dfResults.index:
self.assertEqual(search.dfResults.loc[i, "scalar_result"], search.results[i]["scalar_result"])
self.assertListEqual(search.dfResults.loc[i, "list_result"], search.results[i]["list_result"])
np.testing.assert_array_equal(search.dfResults.loc[i, "array_result"], search.results[i]["array_result"])
class TestExplorationMultiModel(unittest.TestCase):
"""
MultiModel exploration test - uses FHN network.
"""
def test_multimodel_explore(self):
start = time.time()
DELAY = 13.0
fhn_net = FitzHughNagumoNetwork(np.random.rand(2, 2), np.array([[0.0, DELAY], [DELAY, 0.0]]))
model = MultiModel(fhn_net)
parameters = ParameterSpace({"*input*sigma": [0.0, 0.05], "*epsilon*": [0.5, 0.6]}, allow_star_notation=True)
search = BoxSearch(model, parameters, filename="test_multimodel.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue(isinstance(dataarray.attrs, dict))
self.assertListEqual(
list(dataarray.attrs.keys()),
[k.replace("*", "_").replace(".", "_").replace("|", "_") for k in parameters.dict().keys()],
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationMultiModelSequential(unittest.TestCase):
"""
MultiModel exploration test with sequential exploration - uses FHN network.
"""
def test_multimodel_explore(self):
start = time.time()
DELAY = 13.0
fhn_net = FitzHughNagumoNetwork(np.random.rand(2, 2), np.array([[0.0, DELAY], [DELAY, 0.0]]))
model = MultiModel(fhn_net)
parameters = ParameterSpace(
{"*input*sigma": [0.0, 0.05], "*epsilon*": [0.5, 0.6, 0.7]}, allow_star_notation=True, kind="sequence"
)
search = BoxSearch(model, parameters, filename="test_multimodel.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue("run_no" in dataarray.dims)
self.assertEqual(len(dataarray["run_no"]), 5)
self.assertTrue(isinstance(dataarray.attrs, dict))
self.assertListEqual(
list(dataarray.attrs.keys()),
[k.replace("*", "_").replace(".", "_").replace("|", "_") for k in parameters.dict().keys()],
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationSingleNodeSequential(unittest.TestCase):
"""
ALN single node test with sequential exploration.
"""
def test_single_node(self):
start = time.time()
model = ALNModel()
parameters = ParameterSpace({"mue_ext_mean": [0.0, 1.5, 3.0], "mui_ext_mean": [1.5, 3.0]}, kind="sequence")
search = BoxSearch(model, parameters, filename="test_single_nodes.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue("run_no" in dataarray.dims)
self.assertEqual(len(dataarray["run_no"]), 5)
self.assertFalse(dataarray.attrs)
for i in search.dfResults.index:
search.dfResults.loc[i, "max_r"] = np.max(
search.results[i]["rates_exc"][:, -int(1000 / model.params["dt"]) :]
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
if __name__ == "__main__":
unittest.main()
| 36.110345
| 120
| 0.629297
| 1,202
| 10,472
| 5.366057
| 0.197171
| 0.028992
| 0.013643
| 0.04093
| 0.52155
| 0.435504
| 0.425271
| 0.409147
| 0.385271
| 0.362946
| 0
| 0.020682
| 0.238159
| 10,472
| 289
| 121
| 36.235294
| 0.787791
| 0.061784
| 0
| 0.430693
| 0
| 0
| 0.081959
| 0.019897
| 0
| 0
| 0
| 0
| 0.128713
| 1
| 0.074257
| false
| 0
| 0.089109
| 0
| 0.207921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
46077178da4bf46135d1fc5fee2cb11b113e0b42
| 3,568
|
py
|
Python
|
irc3/tags.py
|
belst/irc3
|
c89303cf5937a4dc7cf1eda8e662dc702b5e0ad9
|
[
"MIT"
] | null | null | null |
irc3/tags.py
|
belst/irc3
|
c89303cf5937a4dc7cf1eda8e662dc702b5e0ad9
|
[
"MIT"
] | null | null | null |
irc3/tags.py
|
belst/irc3
|
c89303cf5937a4dc7cf1eda8e662dc702b5e0ad9
|
[
"MIT"
] | 1
|
2018-07-22T18:40:37.000Z
|
2018-07-22T18:40:37.000Z
|
# -*- coding: utf-8 -*-
'''
Module offering 2 functions, encode() and decode(), to transcode between
IRCv3.2 tags and python dictionaries.
'''
import re
import random
import string
_escapes = (
("\\", "\\\\"),
(";", r"\:"),
(" ", r"\s"),
("\r", r"\r"),
("\n", r"\n"),
)
# make the possibility of the substitute actually appearing in the text
# negligible. Even for targeted attacks
_substitute = (";TEMP:%s;" %
''.join(random.choice(string.ascii_letters) for i in range(20)))
_unescapes = (
("\\\\", _substitute),
(r"\:", ";"),
(r"\s", " "),
(r"\r", "\r"),
(r"\n", "\n"),
(_substitute, "\\"),
)
# valid tag-keys must contain of alphanumerics and hyphens only.
# for vendor-tagnames: TLD with slash appended
_valid_key = re.compile("^([\w.-]+/)?[\w-]+$")
# valid escaped tag-values must not contain
# NUL, CR, LF, semicolons or spaces
_valid_escaped_value = re.compile("^[^ ;\n\r\0]*$")
def _unescape(string):
for a, b in _unescapes:
string = string.replace(a, b)
return string
def _escape(string):
for a, b in _escapes:
string = string.replace(a, b)
return string
def encode(tags):
'''Encodes a dictionary of tags to fit into an IRC-message.
See IRC Message Tags: http://ircv3.net/specs/core/message-tags-3.2.html
>>> from collections import OrderedDict
>>> encode({'key': 'value'})
'key=value'
>>> d = {'aaa': 'bbb', 'ccc': None, 'example.com/ddd': 'eee'}
>>> d_ordered = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
>>> encode(d_ordered)
'aaa=bbb;ccc;example.com/ddd=eee'
>>> d = {'key': 'value;with special\\\\characters', 'key2': 'with=equals'}
>>> d_ordered = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
>>> print(encode(d_ordered))
key=value\\:with\\sspecial\\\characters;key2=with=equals
>>> print(encode({'key': r'\\something'}))
key=\\\\something
'''
tagstrings = []
for key, value in tags.items():
if not _valid_key.match(key):
raise ValueError("dictionary key is invalid as tag key: " + key)
# if no value, just append the key
if value:
tagstrings.append(key + "=" + _escape(value))
else:
tagstrings.append(key)
return ";".join(tagstrings)
def decode(tagstring):
'''Decodes a tag-string from an IRC-message into a python dictionary.
See IRC Message Tags: http://ircv3.net/specs/core/message-tags-3.2.html
>>> from pprint import pprint
>>> pprint(decode('key=value'))
{'key': 'value'}
>>> pprint(decode('aaa=bbb;ccc;example.com/ddd=eee'))
{'aaa': 'bbb', 'ccc': None, 'example.com/ddd': 'eee'}
>>> s = r'key=value\\:with\\sspecial\\\\characters;key2=with=equals'
>>> pprint(decode(s))
{'key': 'value;with special\\\\characters', 'key2': 'with=equals'}
>>> print(decode(s)['key'])
value;with special\\characters
>>> print(decode(r'key=\\\\something')['key'])
\\something
'''
if not tagstring:
# None/empty = no tags
return {}
tags = {}
for tag in tagstring.split(";"):
# value is either everything after "=", or None
key, value = (tag.split("=", 1) + [None])[:2]
if not _valid_key.match(key):
raise ValueError("invalid tag key: " + key)
if value:
if not _valid_escaped_value.match(value):
raise ValueError("invalid escaped tag value: " + value)
value = _unescape(value)
tags[key] = value
return tags
| 28.31746
| 79
| 0.580998
| 457
| 3,568
| 4.474836
| 0.310722
| 0.046944
| 0.02934
| 0.031296
| 0.353056
| 0.334963
| 0.329095
| 0.283619
| 0.099756
| 0.099756
| 0
| 0.007653
| 0.230942
| 3,568
| 125
| 80
| 28.544
| 0.737609
| 0.504765
| 0
| 0.148148
| 0
| 0
| 0.104869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.055556
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
46085291ee66b159174d6179bad5ab2c5199a92f
| 28,019
|
py
|
Python
|
src/fedavg_trainer.py
|
MrZhang1994/mobile-federated-learning
|
6e088a91266d889869af5a1eb0bad83ca635a4a5
|
[
"Apache-2.0"
] | null | null | null |
src/fedavg_trainer.py
|
MrZhang1994/mobile-federated-learning
|
6e088a91266d889869af5a1eb0bad83ca635a4a5
|
[
"Apache-2.0"
] | null | null | null |
src/fedavg_trainer.py
|
MrZhang1994/mobile-federated-learning
|
6e088a91266d889869af5a1eb0bad83ca635a4a5
|
[
"Apache-2.0"
] | 1
|
2021-07-06T04:53:06.000Z
|
2021-07-06T04:53:06.000Z
|
# newly added libraries
import copy
import wandb
import time
import math
import csv
import shutil
from tqdm import tqdm
import torch
import numpy as np
import pandas as pd
from client import Client
from config import *
import scheduler as sch
class FedAvgTrainer(object):
def __init__(self, dataset, model, device, args):
self.device = device
self.args = args
[client_num, _, _, train_data_global, _, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset
# record the client number of the dataset
self.client_num = client_num
self.class_num = class_num
# setup dataset
self.data_shape = list(train_data_global[0][0].size())
self.train_data_local_num_dict = train_data_local_num_dict
self.test_data_local_dict = test_data_local_dict
self.train_data_local_dict = train_data_local_dict
if args.partition_method == "noniid":
logger.info("-----------non-i.i.d transform----------")
# generate the non i.i.d dataset
self.gene_non_iid_dataset(train_data_global, "tmp")
# read the non i.i.d dataset
self.read_non_iid_dataset("tmp")
# rm the tmp directory
shutil.rmtree(os.path.join('.', 'tmp'))
self.client_list = []
self.setup_clients(train_data_local_num_dict, train_data_local_dict, test_data_local_dict)
# initialize the recorder of invalid dataset
self.invalid_datasets = dict()
# time counter starts from the first line
self.time_counter = channel_data['Time'][0]
# initialize the cycle_num here
self.cycle_num = 0
# initialize the scheduler function
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_1()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_2()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_3()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_random":
self.scheduler = sch.sch_random
elif self.args.method == "sch_channel":
self.scheduler = sch.sch_channel
elif self.args.method == "sch_rrobin":
self.scheduler = sch.sch_rrobin
elif self.args.method == "sch_loss":
self.scheduler = sch.sch_loss
else:
self.scheduler = sch.sch_random
self.model = model
self.model_global = model(self.args, model_name=self.args.model, output_dim=self.class_num)
self.model_global.train()
def setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict):
logger.debug("############setup_clients (START)#############")
for client_idx in range(client_num_per_round):
c = Client(client_idx, train_data_local_dict[client_idx], test_data_local_dict[client_idx],
train_data_local_num_dict[client_idx], self.args, self.device)
self.client_list.append(c)
logger.debug("############setup_clients (END)#############")
def train(self):
"""
Global initialized values
"""
# maintain a lst for local losses
local_loss_lst = np.zeros((1, client_num_in_total))
# maintain a lst for local acc
_, dataset_acc_lst = self.local_test_on_all_clients(self.model_global, 0, True, False)
local_acc_lst = dataset_acc_lst[np.arange(client_num_in_total) % self.client_num]
# counting days
counting_days, reward = 0, 0
# initialize values for calculating iteration num
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], np.random.rand(1)[0], np.random.rand(1)[0], True, True
# Initialize values for calculating FPF2 index
local_itr_lst = torch.zeros(self.args.comm_round, int(client_num_in_total)).to(self.device) # historical local iterations.
G_mat = torch.zeros(int(client_num_in_total)).to(self.device) # initial the value of G with zero
# if weight size is larger than THRESHOLD_WEIGHT_SIZE we will use a simpler method to calculate FPF
weight_size = sum([self.model_global.cpu().state_dict()[para].numpy().ravel().shape[0] for para in self.model_global.state_dict().keys()])
if weight_size < THRESHOLD_WEIGHT_SIZE:
A_mat = torch.ones(weight_size).to(self.device) # initial the value of A with ones.
local_w_diffs = torch.zeros((int(client_num_in_total), weight_size)).to(self.device)
else:
logger.warning("The weight size of the model {} is too large. Thus, we turn to use a more simple method to calculate FPF.".format(self.args.model))
LRU_itr_lst = torch.zeros(int(client_num_in_total)).to(self.device) # store the iteration gap for each client.
# show weight size for the model.
logger.debug("weight size: {}".format(weight_size))
"""
starts training, entering the loop of command round.
"""
Inform = {}
traffic = 0
for round_idx in range(self.args.comm_round):
logger.info("################Communication round : {}".format(round_idx))
# set the time_counter
self.time_counter = np.array(channel_data['Time'][channel_data['Time'] >= self.time_counter])[0]
logger.info("time_counter: {}".format(self.time_counter))
self.model_global.train()
# get client_indexes from scheduler
reward, loss_a, loss_c = 0, 0, 0
if (self.args.method)[:6] == "sch_pn":
if self.args.method[-5:] == "empty" or round_idx == 0:
client_indexes, local_itr = self.scheduler.sch_pn_empty(round_idx, self.time_counter)
else:
client_indexes, local_itr, (reward, loss_a, loss_c) = self.scheduler.sch_pn(round_idx, self.time_counter, loss_locals, FPF2_idx_lst, local_loss_lst, )
else:
if self.args.method == "sch_loss":
if round_idx == 0:
loss_locals = []
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter, loss_locals)
else:
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter)
# write to the scheduler csv
with open(scheduler_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter', 'client index', 'iteration'])
csv_writer.writerow([self.time_counter, str(client_indexes), local_itr])
file.flush()
logger.info("client_indexes = " + str(client_indexes))
traffic += len(client_indexes)
# write one line to trainer_csv
trainer_csv_line = [round_idx, self.time_counter, str(client_indexes), traffic]
# contribute to time counter
self.tx_time(list(client_indexes)) # transmit time
# store the last model's training parameters.
last_w = copy.deepcopy(self.model_global.cpu().state_dict())
# local Initialization
w_locals, loss_locals, beta_locals, rho_locals, cycle_locals = [], [], [], [], []
"""
for scalability: following the original FedAvg algorithm, we uniformly sample a fraction of clients in each round.
Instead of changing the 'Client' instances, our implementation keeps the 'Client' instances and then updates their local dataset
"""
for idx in range(len(client_indexes)):
# update dataset
client = self.client_list[idx]
client_idx = client_indexes[idx]
dataset_idx = client_idx % self.client_num
if dataset_idx in self.invalid_datasets.keys():
current_idx = self.invalid_datasets[dataset_idx]
else:
current_idx = dataset_idx
while True:
client.update_local_dataset(current_idx, self.train_data_local_dict[current_idx],
self.test_data_local_dict[current_idx],
self.train_data_local_num_dict[current_idx])
# train on new dataset
# add a new parameter "local_itr" to the funciton "client.train()"
# add a new return value "time_interval" which is the time consumed for training model in client.
w, loss, local_beta, local_rho, local_acc, local_cycle = client.train(net=copy.deepcopy(self.model_global).to(self.device), local_iteration = local_itr)
if loss != None and local_beta != None and local_rho != None and local_acc != None:
if dataset_idx != current_idx:
self.invalid_datasets[dataset_idx] = current_idx
break
current_idx = np.random.randint(self.class_num)
logger.warning("changing dataset for {} to {}".format(client_idx, current_idx))
# record current cycle
cycle_locals.append([client.get_sample_number(), local_cycle])
# record current w into w_locals
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# record current loss into loss_locals
loss_locals.append(loss)
# record local beta into beta_locals
beta_locals.append(local_beta)
# record local beta into rho_locals
rho_locals.append(local_rho)
# update the local_loss_lst
local_loss_lst[0, client_idx] = loss
# update local_w_diffs
if weight_size < THRESHOLD_WEIGHT_SIZE:
local_w_diffs[client_idx, :] = torch.cat([w[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
# update local_acc_lst
local_acc_lst[client_idx] = local_acc
# loss
logger.info('Client {:3d}, loss {:.3f}'.format(client_idx, loss))
# update global weights
w_glob = self.aggregate(w_locals)
# copy weight to net_glob
self.model_global.load_state_dict(w_glob)
# update the time counter
if list(client_indexes):
self.time_counter += math.ceil(LOCAL_TRAINING_TIME)
logger.debug("time_counter after training: {}".format(self.time_counter))
trainer_csv_line += [self.time_counter-trainer_csv_line[1], np.var(local_loss_lst), str(loss_locals), np.var(loss_locals), np.var(local_acc_lst)]
# print loss
if not loss_locals:
logger.info('Round {:3d}, Average loss None'.format(round_idx))
trainer_csv_line.append('None')
else:
loss_avg = sum(loss_locals) / len(loss_locals)
logger.info('Round {:3d}, Average loss {:.3f}'.format(round_idx, loss_avg))
trainer_csv_line.append(loss_avg)
if cycle_locals:
cycle_locals = np.asarray(cycle_locals)
logger.info('Elapsed cycles {:.3f}'.format(np.sum(cycle_locals[:, 0] * cycle_locals[:, 1]) / np.sum(cycle_locals[:, 0])))
# local test on all client.
if round_idx % self.args.frequency_of_the_test == 0 or round_idx == self.args.comm_round - 1:
test_acc, _ = self.local_test_on_all_clients(self.model_global, round_idx, EVAL_ON_TRAIN, True)
trainer_csv_line.append(test_acc)
# write headers for csv
with open(trainer_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['round index', 'time counter', 'client index', 'traffic', 'train time', 'fairness',
'local loss', "local loss var", "local acc var", 'global loss', 'test accuracy'])
csv_writer.writerow(trainer_csv_line)
file.flush()
# log on wandb
Inform["reward"] = reward
wandb.log(Inform)
Inform = {
"reward": reward, "loss_a": loss_a,
"loss_c": loss_c, "round": round_idx,
"traffic": traffic,
"beta": beta, "rho": rho, "delta": delta,
"cum_time": trainer_csv_line[1]+self.cycle_num*59361,
"local_itr": local_itr,
"client_num": len(client_indexes),
"C3": (rho*delta)/beta,
"local_loss_var": np.var(loss_locals),
"local_acc_var": np.var(local_acc_lst)
}
# update FPF index list
if weight_size < THRESHOLD_WEIGHT_SIZE:
FPF2_idx_lst = torch.norm(local_w_diffs * A_mat, dim = 1) / G_mat
else:
FPF2_idx_lst = LRU_itr_lst / G_mat
FPF2_idx_lst = FPF2_idx_lst.cpu().numpy()
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# FPF2_idx_lst = FPF2_idx_lst / max(FPF2_idx_lst)
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# write FPF index list to csv
with open(FPF_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter'] + ["car_"+str(i) for i in range(client_num_in_total)])
csv_writer.writerow([trainer_csv_line[1]]+FPF2_idx_lst.tolist())
file.flush()
# update beta & delta & rho
if w_locals and loss_locals:
sample_nums = np.array([sample_num for sample_num, _ in w_locals])
local_w_diff_norms = np.array([torch.norm(torch.cat([w[para].reshape((-1, )) - w_glob[para].reshape((-1, )) for para in self.model_global.state_dict().keys()])).item() for _, w in w_locals])
# calculate delta
delta_tmp = np.sum(sample_nums * local_w_diff_norms) / np.sum(sample_nums) / self.args.lr
if (not np.isnan(delta_tmp) and not np.isinf(delta_tmp)):
delta = delta_tmp
# update rho
rho_tmp = np.sum(sample_nums * np.array(rho_locals)) / np.sum(sample_nums)
if rho_tmp > rho or rho_flag:
if (not np.isnan(rho_tmp) and not np.isinf(rho_tmp)) and rho_tmp < THRESHOLD_RHO:
rho, rho_flag = rho_tmp, False
# update beta
beta_tmp = np.sum(sample_nums * np.array(beta_locals)) / np.sum(sample_nums)
if beta_tmp > beta or beta_flag:
if (not np.isnan(beta_tmp) and not np.isinf(beta_tmp)) and beta_tmp < THRESHOLD_BETA:
beta, beta_flag = beta_tmp, False
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
self.scheduler.calculate_itr_method_1(delta)
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
self.scheduler.calculate_itr_method_2(rho, beta, delta)
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
self.scheduler.calculate_itr_method_3(rho, beta, delta)
if weight_size < THRESHOLD_WEIGHT_SIZE:
# update local_w_diffs
global_w_diff = torch.cat([w_glob[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
local_w_diffs[list(set(list(range(client_num_in_total))) - set(list(client_indexes))), :] -= global_w_diff
# update A_mat
A_mat = A_mat * (1 - 1/G2) + (global_w_diff) / G2 / global_w_diff.mean()
# Update local_itr_lst
if list(client_indexes) and local_itr > 0: # only if client_idx is not empty and local_iter > 0, then I will update following values
local_itr_lst[round_idx, list(client_indexes)] = float(local_itr)
if weight_size >= THRESHOLD_WEIGHT_SIZE:
LRU_itr_lst += float(local_itr)
LRU_itr_lst[list(client_indexes)] = 0
# update G_mat
G_mat = G_mat * (1 - 1 / G1) + local_itr_lst[round_idx, :] / G1
# if current time_counter has exceed the channel table, I will simply stop early
if self.time_counter >= time_cnt_max[counting_days]:
counting_days += 1
if counting_days % RESTART_DAYS == 0:
if self.args.method == "find_constant" and loss_locals:
w_optimal, loss_optimal = self.central_train()
w = torch.cat([param.view(-1) for param in self.model_global.parameters()])
w_diff_optimal = torch.norm(w.cpu() - w_optimal.cpu())
logger.info("The norm of difference between w_optmal & w: {}".format(w_diff_optimal.item()))
logger.info("The norm of difference between loss & loss_optimal: {}".format(loss_avg - loss_optimal))
break
logger.info("################reinitialize model")
self.model_global = self.model(self.args, model_name=self.args.model, output_dim=self.class_num)
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], np.random.rand(1)[0], np.random.rand(1)[0], True, True
traffic = 0
if counting_days >= DATE_LENGTH:
logger.info("################training restarts")
counting_days = 0
self.time_counter = 0
self.cycle_num = self.cycle_num+1
def central_train(self):
logger.info("################global optimal weights calculation")
model = self.model(self.args, model_name=self.args.model, output_dim=self.class_num)
criterion = torch.nn.CrossEntropyLoss().to(self.device)
model.to(self.device)
if self.args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr)
else:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=self.args.lr,
weight_decay=self.args.wd, amsgrad=True)
for _ in tqdm(range(self.args.central_round)):
for client_idx in range(self.client_num):
x, labels = next(iter(self.train_data_local_dict[client_idx]))
x, labels = x.to(self.device), labels.to(self.device)
model.train()
model.zero_grad()
log_probs = model(x)
loss = criterion(log_probs, labels)
loss.backward()
loss = loss.item()
optimizer.step()
wandb.log({"central_training/loss": loss})
w_optimal = torch.cat([param.view(-1) for param in model.parameters()])
loss_optimal = loss
return w_optimal, loss_optimal
def gene_non_iid_dataset(self, train_global, directory):
"""
changing self.train_data_local_dict to non-i.i.d. dataset.
And change self.train_data_local_num_dict correspondingly.
"""
data, labels = train_global[0][0], train_global[0][1] # read the tensor from train_global.
# transform shape
data = data.view(data.shape[0], -1)
labels = labels.view(labels.shape[0], -1)
# get full_df
full_df = pd.DataFrame(np.concatenate((data.numpy(), labels.numpy()), axis=1)).sample(frac=1, random_state=self.args.seed)
# temporary store the data in dir
save_dir = os.path.join(".", directory)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for client_idx in tqdm(range(self.client_num)):
# get selected classes
try:
selected_classes = set(list(np.random.choice(list(set(full_df.iloc[:, -1])), CLASS_NUM)))
except:
selected_classes = set(full_df.iloc[:, -1])
# got valid data
valid_data = full_df[full_df.iloc[:, -1].isin(selected_classes)]
# get number of data on the local client
local_num = self.train_data_local_num_dict[client_idx]
# got selected data # remember to shuffle the data
try:
selected_data = valid_data[0:local_num]
except:
selected_data = valid_data
self.train_data_local_dict[client_idx] = len(selected_data)
# update the local client data
np.save(os.path.join(save_dir, "client_{}_data.npy".format(client_idx)), selected_data.iloc[:, 0:-1].values)
np.save(os.path.join(save_dir, "client_{}_labels.npy".format(client_idx)), selected_data.iloc[:, -1].values)
# remove the data from the full_df
full_df = full_df.drop(index=selected_data.index)
def read_non_iid_dataset(self, directory):
for client_idx in tqdm(range(self.client_num)):
data_shape = [self.train_data_local_num_dict[client_idx]] + self.data_shape[1:]
data_path = os.path.join(".", directory, "client_{}_data.npy".format(client_idx))
labels_path = os.path.join(".", directory, "client_{}_labels.npy".format(client_idx))
self.train_data_local_dict[client_idx] = [(torch.from_numpy(np.load(data_path)).view(tuple(data_shape)).float(), torch.from_numpy(np.load(labels_path)).long())]
def tx_time(self, client_indexes):
if not client_indexes:
self.time_counter += 1
return
# read the channel condition for corresponding cars.
channel_res = np.reshape(np.array(channel_data[channel_data['Time'] == self.time_counter * channel_data['Car'].isin(client_indexes)]["Distance to BS(4982,905)"]), (1, -1))
logger.debug("channel_res: {}".format(channel_res))
# linearly resolve the optimazation problem
tmp_t = 1
if self.args.radio_alloc == "optimal":
while np.sum(RES_WEIGHT * channel_res * RES_RATIO / tmp_t) > 1:
tmp_t += 1
elif self.args.radio_alloc == "uniform":
while np.max(channel_res) * RES_WEIGHT * RES_RATIO * len(channel_res) / tmp_t > 1:
tmp_t += 1
self.time_counter += math.ceil(TIME_COMPRESSION_RATIO*tmp_t)
logger.debug("time_counter after tx_time: {}".format(self.time_counter))
def aggregate(self, w_locals):
if not w_locals:
return copy.deepcopy(self.model_global.cpu().state_dict())
training_num = 0
for idx in range(len(w_locals)):
(sample_num, averaged_params) = w_locals[idx]
training_num += sample_num
(sample_num, averaged_params) = w_locals[0]
for k in averaged_params.keys():
for i in range(0, len(w_locals)):
local_sample_number, local_model_params = w_locals[i]
w = local_sample_number / training_num
if i == 0:
averaged_params[k] = local_model_params[k] * w
else:
averaged_params[k] += local_model_params[k] * w
return averaged_params
def local_test_on_all_clients(self, model_global, round_idx, eval_on_train=False, if_log=True):
logger.info("################local_test_on_all_clients : {}".format(round_idx))
train_metrics = {
'num_samples': [],
'num_correct': [],
'losses': []
}
test_metrics = {
'num_samples': [],
'num_correct': [],
'losses': []
}
client = self.client_list[0]
for client_idx in tqdm(range(min(int(client_num_in_total), self.client_num))):
"""
Note: for datasets like "fed_CIFAR100" and "fed_shakespheare",
the training client number is larger than the testing client number
"""
if self.test_data_local_dict[client_idx] is None or client_idx in self.invalid_datasets.keys():
continue
client.update_local_dataset(client_idx, self.train_data_local_dict[client_idx],
self.test_data_local_dict[client_idx],
self.train_data_local_num_dict[client_idx])
# test data
test_local_metrics = client.local_test(model_global, True)
test_metrics['num_samples'].append(copy.deepcopy(test_local_metrics['test_total']))
test_metrics['num_correct'].append(copy.deepcopy(test_local_metrics['test_correct']))
test_metrics['losses'].append(copy.deepcopy(test_local_metrics['test_loss']))
# train data
if eval_on_train:
train_local_metrics = client.local_test(model_global, False)
train_metrics['num_samples'].append(copy.deepcopy(train_local_metrics['test_total']))
train_metrics['num_correct'].append(copy.deepcopy(train_local_metrics['test_correct']))
train_metrics['losses'].append(copy.deepcopy(train_local_metrics['test_loss']))
# test on test dataset
test_acc = sum(test_metrics['num_correct']) / sum(test_metrics['num_samples'])
test_loss = sum(test_metrics['losses']) / sum(test_metrics['num_samples'])
stats = {
"Test/Acc": test_acc,
"Test/Loss": test_loss,
"round": round_idx,
"cum_time": self.time_counter+self.cycle_num*59361,
}
# test on training dataset
if eval_on_train:
train_acc = sum(train_metrics['num_correct']) / sum(train_metrics['num_samples'])
train_loss = sum(train_metrics['losses']) / sum(train_metrics['num_samples'])
stats.update({
'Train/Acc': train_acc,
'Train/Loss': train_loss,
"round": round_idx,
"cum_time": self.time_counter+self.cycle_num*59361,
})
if if_log:
logger.info(stats)
wandb.log(stats)
return test_acc, np.array(train_metrics['num_correct']) / np.array(train_metrics['num_samples'])
if if_log:
logger.info(stats)
wandb.log(stats)
return test_acc, None
| 52.965974
| 206
| 0.5891
| 3,562
| 28,019
| 4.353734
| 0.113139
| 0.021666
| 0.020763
| 0.018636
| 0.434421
| 0.350335
| 0.263735
| 0.201122
| 0.167913
| 0.150116
| 0
| 0.010086
| 0.302866
| 28,019
| 529
| 207
| 52.965974
| 0.783853
| 0.089582
| 0
| 0.205128
| 0
| 0.002564
| 0.076412
| 0.013728
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023077
| false
| 0
| 0.033333
| 0
| 0.074359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
46087abee7bffbddb94f7edf7ace7481d6b4e5e7
| 15,539
|
py
|
Python
|
src/test.py
|
jfparentledartech/DEFT
|
6e7e98664cd635509bdff69533a24a7c4e4e3ea3
|
[
"MIT"
] | null | null | null |
src/test.py
|
jfparentledartech/DEFT
|
6e7e98664cd635509bdff69533a24a7c4e4e3ea3
|
[
"MIT"
] | null | null | null |
src/test.py
|
jfparentledartech/DEFT
|
6e7e98664cd635509bdff69533a24a7c4e4e3ea3
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from progress.bar import Bar
import torch
import pickle
import motmetrics as mm
from lib.opts import opts
from lib.logger import Logger
from lib.utils.utils import AverageMeter
from lib.dataset.dataset_factory import dataset_factory
from lib.utils.pixset_metrics import compute_metrics
pixset_categories = [
'car',
'truck',
'bus',
'pedestrian',
'motorcyclist',
'cyclist',
'van'
]
opt = opts().parse()
filename = '../options/test_opt_pixset.txt'
with open(filename, 'wb') as f:
pickle.dump(opt, f)
# # print('dataset -> ', opt.dataset)
# print('lstm -> ', opt.lstm)
# print(f'saved {filename}')
# with open(filename, 'rb') as f:
# opt = pickle.load(f)
# print('use pixell ->', opt.use_pixell)
from lib.detector import Detector
from lib.utils.image import plot_tracking, plot_tracking_ddd
import json
min_box_area = 20
_vehicles = ["car", "truck", "bus", "van"]
_cycles = ["motorcyclist", "cyclist"]
_pedestrians = ["pedestrian"]
attribute_to_id = {
"": 0,
"cycle.with_rider": 1,
"cycle.without_rider": 2,
"pedestrian.moving": 3,
"pedestrian.standing": 4,
"pedestrian.sitting_lying_down": 5,
"vehicle.moving": 6,
"vehicle.parked": 7,
"vehicle.stopped": 8,
}
id_to_attribute = {v: k for k, v in attribute_to_id.items()}
nuscenes_att = np.zeros(8, np.float32)
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.get_ann_ids = dataset.coco.getAnnIds
self.load_annotations = dataset.coco.loadAnns
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.get_default_calib = dataset.get_default_calib
self.opt = opt
def __getitem__(self, index):
self.images.sort() # TODO remove
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info["file_name"])
image = cv2.imread(img_path)
annotation_ids = self.get_ann_ids(imgIds=[img_id])
annotations = self.load_annotations(ids=annotation_ids)
images, meta = {}, {}
for scale in opt.test_scales:
input_meta = {}
calib = (
img_info["calib"]
if "calib" in img_info
else self.get_default_calib(image.shape[1], image.shape[0])
)
input_meta["calib"] = calib
images[scale], meta[scale] = self.pre_process_func(image, scale, input_meta)
ret = {
"images": images,
"image": image,
"meta": meta,
"frame_id": img_info["frame_id"],
"annotations": annotations
}
if "frame_id" in img_info and img_info["frame_id"] == 1:
ret["is_first_frame"] = 1
ret["video_id"] = img_info["video_id"]
return img_id, ret, img_info
def __len__(self):
return len(self.images)
def prefetch_test(opt):
start_time = time.time()
show_image = True
if not opt.not_set_cuda_env:
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
# split = "val" if not opt.trainval else "test"
split = "test"
# split = "val"
dataset = Dataset(opt, split)
detector = Detector(opt)
if opt.load_results != "":
load_results = json.load(open(opt.load_results, "r"))
for img_id in load_results:
for k in range(len(load_results[img_id])):
if load_results[img_id][k]["class"] - 1 in opt.ignore_loaded_cats:
load_results[img_id][k]["score"] = -1
else:
load_results = {}
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=True,
)
results = {}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar("{}".format(opt.exp_id), max=num_iters)
time_stats = ["tot", "load", "pre", "net", "dec", "post", "merge", "track"]
avg_time_stats = {t: AverageMeter() for t in time_stats}
if opt.use_loaded_results:
for img_id in data_loader.dataset.images:
results[img_id] = load_results["{}".format(img_id)]
num_iters = 0
final_results = []
out_path = ""
if opt.dataset in ["nuscenes", "pixset"]:
ret = {
"meta": {
"use_camera": True,
"use_lidar": False,
"use_radar": False,
"use_map": False,
"use_external": False,
},
"results": {},
}
accumulators = [mm.MOTAccumulator(auto_id=True) for _ in pixset_categories]
for ind, (img_id, pre_processed_images, img_info) in enumerate(data_loader):
bar.next()
if ind >= num_iters:
break
if opt.dataset == "nuscenes":
sample_token = img_info["sample_token"][0]
sensor_id = img_info["sensor_id"].numpy().tolist()[0]
if opt.dataset == "pixset":
sample_token = img_info["sample_token"][0]
sensor_id = img_info["sensor_id"].numpy().tolist()[0]
if opt.tracking and ("is_first_frame" in pre_processed_images):
if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images["meta"]["pre_dets"] = load_results[
"{}".format(int(img_id.numpy().astype(np.int32)[0]))
]
else:
print(
"No pre_dets for",
int(img_id.numpy().astype(np.int32)[0]),
". Use empty initialization.",
)
pre_processed_images["meta"]["pre_dets"] = []
if final_results and opt.dataset not in ["nuscenes", "pixset"]:
write_results(out_path, final_results, opt.dataset)
final_results = []
img0 = pre_processed_images["image"][0].numpy()
h, w, _ = img0.shape
detector.img_height = h
detector.img_width = w
if opt.dataset in ["nuscenes", "pixset"]:
save_video_name = os.path.join(
opt.dataset + "_videos/",
"MOT"
+ str(int(pre_processed_images["video_id"]))
+ "_"
+ str(int(img_info["sensor_id"]))
+ str(int(img_info["video_id"]))
+ ".avi",
)
elif opt.dataset == "kitti_tracking":
save_video_name = os.path.join(
opt.dataset + "_videos/",
"KITTI_" + str(int(pre_processed_images["video_id"])) + ".avi",
)
else:
save_video_name = os.path.join(
opt.dataset + "_videos/",
"MOT" + str(int(pre_processed_images["video_id"])) + ".avi",
)
results_dir = opt.dataset + "_results"
if not os.path.exists(opt.dataset + "_videos/"):
os.mkdir(opt.dataset + "_videos/")
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for video in dataset.coco.dataset["videos"]:
video_id = video["id"]
file_name = video["file_name"]
if pre_processed_images[
"video_id"
] == video_id and opt.dataset not in ["nuscenes", "pixset"]:
out_path = os.path.join(results_dir, "{}.txt".format(file_name))
break
detector.reset_tracking(opt)
vw = cv2.VideoWriter(
save_video_name, cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (w, h)
)
print("Start tracking video", int(pre_processed_images["video_id"]))
if opt.public_det:
if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
pre_processed_images["meta"]["cur_dets"] = load_results[
"{}".format(int(img_id.numpy().astype(np.int32)[0]))
]
else:
print("No cur_dets for", int(img_id.numpy().astype(np.int32)[0]))
pre_processed_images["meta"]["cur_dets"] = []
online_targets = detector.run(pre_processed_images, image_info=img_info)
online_tlwhs = []
online_ids = []
online_ddd_boxes = []
sample_results = []
classes = []
image = pre_processed_images["image"][0].numpy()
for acc_i in range(len(accumulators)):
gt_list, hyp_list, distances = compute_metrics(pre_processed_images['annotations'],
online_targets, eval_type='distance',
im=image, category=pixset_categories[acc_i])
accumulators[acc_i].update(gt_list, hyp_list, distances)
idx = 0
print(ind)
print(accumulators[idx].mot_events.loc[ind])
mh = mm.metrics.create()
summary = mh.compute(accumulators[idx], metrics=['num_frames', 'mota', 'precision', 'recall'], name=f'acc {pixset_categories[idx]}')
print(summary)
print('-----------------------------------------')
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
if tlwh[2] * tlwh[3] > min_box_area:
online_tlwhs.append(tlwh)
online_ids.append(tid)
classes.append(t.classe)
if opt.dataset in ["nuscenes", "pixset"]:
online_ddd_boxes.append(t.org_ddd_box)
class_name = t.classe
if class_name in _cycles:
att = id_to_attribute[np.argmax(nuscenes_att[0:2]) + 1]
elif class_name in _pedestrians:
att = id_to_attribute[np.argmax(nuscenes_att[2:5]) + 3]
elif class_name in _vehicles:
att = id_to_attribute[np.argmax(nuscenes_att[5:8]) + 6]
ddd_box = t.ddd_bbox.copy()
ddd_box_submission = t.ddd_submission.tolist()
translation, size, rotation = (
ddd_box_submission[:3],
ddd_box_submission[3:6],
ddd_box_submission[6:],
)
result = {
"sample_token": sample_token,
"translation": translation,
"size": size,
"rotation": rotation,
"velocity": [0, 0],
"detection_name": t.classe,
# "attribute_name": att,
"attribute_name": None,
"detection_score": t.score,
"tracking_name": t.classe,
"tracking_score": t.score,
"tracking_id": tid,
"sensor_id": sensor_id,
"det_id": -1,
}
sample_results.append(result.copy())
if opt.dataset in ["nuscenes", "pixset"]:
if sample_token in ret["results"]:
ret["results"][sample_token] = (
ret["results"][sample_token] + sample_results
)
else:
ret["results"][sample_token] = sample_results
final_results.append(
(pre_processed_images["frame_id"].cpu().item(), online_tlwhs, online_ids)
)
if show_image:
img0 = pre_processed_images["image"][0].numpy()
if opt.dataset in ["nuscenes", "pixset"]:
online_im = plot_tracking_ddd(
img0,
online_tlwhs,
online_ddd_boxes,
online_ids,
frame_id=pre_processed_images["frame_id"],
calib=img_info["calib"],
trans_matrix=img_info["trans_matrix"],
camera_matrix=img_info["camera_matrix"],
distortion_coeffs=img_info["distortion_coefficients"],
classes=classes,
)
else:
online_im = plot_tracking(
img0,
online_tlwhs,
online_ids,
frame_id=pre_processed_images["frame_id"],
)
vw.write(online_im)
if opt.dataset not in ["nuscenes", "pixset"] and final_results:
write_results(out_path, final_results, opt.dataset)
final_results = []
if opt.dataset in ["nuscenes", "pixset"]:
for sample_token in ret["results"].keys():
confs = sorted(
[
(-d["detection_score"], ind)
for ind, d in enumerate(ret["results"][sample_token])
]
)
ret["results"][sample_token] = [
ret["results"][sample_token][ind]
for _, ind in confs[: min(500, len(confs))]
]
mh = mm.metrics.create()
metrics = ['num_frames', 'mota', 'motp', 'precision', 'recall']
summary = mh.compute_many(
accumulators, names=pixset_categories, metrics=metrics, generate_overall=True
)
print(summary)
save_summary(summary, 'overall')
print('total test time', time.time() - start_time)
def save_summary(summary, acc_name):
with open(f"../pixset_results/test/{acc_name}.txt", "w") as text_file:
text_file.write(summary.to_string())
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results
def write_results(filename, results, data_type):
if data_type == "mot":
save_format = "{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n"
elif data_type == "kitti_tracking":
save_format = "{frame} {id} Car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n"
else:
raise ValueError(data_type)
with open(filename, "w") as f:
for frame_id, tlwhs, track_ids in results:
if data_type == "kitti_tracking":
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h
)
f.write(line)
if __name__ == "__main__":
# opt = opts().parse()
prefetch_test(opt)
| 36.137209
| 140
| 0.533882
| 1,783
| 15,539
| 4.394279
| 0.18396
| 0.030632
| 0.04365
| 0.010721
| 0.24314
| 0.203318
| 0.16388
| 0.138609
| 0.107084
| 0.09164
| 0
| 0.0132
| 0.34185
| 15,539
| 429
| 141
| 36.221445
| 0.752909
| 0.019821
| 0
| 0.138889
| 0
| 0.005556
| 0.110103
| 0.014921
| 0
| 0
| 0
| 0.002331
| 0
| 1
| 0.019444
| false
| 0
| 0.055556
| 0.002778
| 0.086111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
460899f19cdb6ea7310d1622b1d7bbb727078007
| 8,796
|
py
|
Python
|
compiler_gym/envs/gcc/datasets/csmith.py
|
AkillesAILimited/CompilerGym
|
34c0933ba26b385ebd2cd67f5d8edbb046c6bf02
|
[
"MIT"
] | null | null | null |
compiler_gym/envs/gcc/datasets/csmith.py
|
AkillesAILimited/CompilerGym
|
34c0933ba26b385ebd2cd67f5d8edbb046c6bf02
|
[
"MIT"
] | null | null | null |
compiler_gym/envs/gcc/datasets/csmith.py
|
AkillesAILimited/CompilerGym
|
34c0933ba26b385ebd2cd67f5d8edbb046c6bf02
|
[
"MIT"
] | 1
|
2021-10-01T05:52:34.000Z
|
2021-10-01T05:52:34.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import shutil
import subprocess
import tempfile
from pathlib import Path
from threading import Lock
from typing import Iterable, Optional, Union
import numpy as np
from fasteners import InterProcessLock
from compiler_gym.datasets import Benchmark, BenchmarkSource, Dataset
from compiler_gym.datasets.benchmark import BenchmarkWithSource
from compiler_gym.envs.gcc.gcc import Gcc
from compiler_gym.util.decorators import memoized_property
from compiler_gym.util.runfiles_path import runfiles_path
from compiler_gym.util.shell_format import plural
from compiler_gym.util.truncate import truncate
# The maximum value for the --seed argument to csmith.
UINT_MAX = (2 ** 32) - 1
_CSMITH_BIN = runfiles_path("compiler_gym/third_party/csmith/csmith/bin/csmith")
_CSMITH_INCLUDES = runfiles_path(
"compiler_gym/third_party/csmith/csmith/include/csmith-2.3.0"
)
_CSMITH_INSTALL_LOCK = Lock()
# TODO(github.com/facebookresearch/CompilerGym/issues/325): This can be merged
# with the LLVM implementation.
class CsmithBenchmark(BenchmarkWithSource):
"""A CSmith benchmark."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._src = None
@classmethod
def create(cls, uri: str, bitcode: bytes, src: bytes) -> Benchmark:
"""Create a benchmark from paths."""
benchmark = cls.from_file_contents(uri, bitcode)
benchmark._src = src # pylint: disable=protected-access
return benchmark
@memoized_property
def sources(self) -> Iterable[BenchmarkSource]:
return [
BenchmarkSource(filename="source.c", contents=self._src),
]
@property
def source(self) -> str:
"""Return the single source file contents as a string."""
return self._src.decode("utf-8")
class CsmithDataset(Dataset):
"""A dataset which uses Csmith to generate programs.
Csmith is a tool that can generate random conformant C99 programs. It is
described in the publication:
Yang, Xuejun, Yang Chen, Eric Eide, and John Regehr. "Finding and
understanding bugs in C compilers." In Proceedings of the 32nd ACM
SIGPLAN conference on Programming Language Design and Implementation
(PLDI), pp. 283-294. 2011.
For up-to-date information about Csmith, see:
https://embed.cs.utah.edu/csmith/
Note that Csmith is a tool that is used to find errors in compilers. As
such, there is a higher likelihood that the benchmark cannot be used for an
environment and that :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` will raise :class:`BenchmarkInitError
<compiler_gym.datasets.BenchmarkInitError>`.
"""
def __init__(
self,
gcc_bin: Union[Path, str],
site_data_base: Path,
sort_order: int = 0,
csmith_bin: Optional[Path] = None,
csmith_includes: Optional[Path] = None,
):
"""Constructor.
:param site_data_base: The base path of a directory that will be used to
store installed files.
:param sort_order: An optional numeric value that should be used to
order this dataset relative to others. Lowest value sorts first.
:param csmith_bin: The path of the Csmith binary to use. If not
provided, the version of Csmith shipped with CompilerGym is used.
:param csmith_includes: The path of the Csmith includes directory. If
not provided, the includes of the Csmith shipped with CompilerGym is
used.
"""
super().__init__(
name="generator://csmith-v0",
description="Random conformant C99 programs",
references={
"Paper": "http://web.cse.ohio-state.edu/~rountev.1/5343/pdf/pldi11.pdf",
"Homepage": "https://embed.cs.utah.edu/csmith/",
},
license="BSD",
site_data_base=site_data_base,
sort_order=sort_order,
benchmark_class=CsmithBenchmark,
)
self.gcc_bin = gcc_bin
self.csmith_bin_path = csmith_bin or _CSMITH_BIN
self.csmith_includes_path = csmith_includes or _CSMITH_INCLUDES
self._install_lockfile = self.site_data_path / ".install.LOCK"
@property
def size(self) -> int:
# Actually 2^32 - 1, but practically infinite for all intents and
# purposes.
return 0
@memoized_property
def gcc(self):
# Defer instantiation of Gcc from the constructor as it will fail if the
# given Gcc is not available. Memoize the result as initialization is
# expensive.
return Gcc(bin=self.gcc_bin)
def benchmark_uris(self) -> Iterable[str]:
return (f"{self.name}/{i}" for i in range(UINT_MAX))
def benchmark(self, uri: str) -> CsmithBenchmark:
return self.benchmark_from_seed(int(uri.split("/")[-1]))
def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark:
seed = random_state.integers(UINT_MAX)
return self.benchmark_from_seed(seed)
@property
def installed(self) -> bool:
return super().installed and (self.site_data_path / "includes").is_dir()
def install(self) -> None:
super().install()
if self.installed:
return
with _CSMITH_INSTALL_LOCK, InterProcessLock(self._install_lockfile):
if (self.site_data_path / "includes").is_dir():
return
# Copy the Csmith headers into the dataset's site directory path because
# in bazel builds this includes directory is a symlink, and we need
# actual files that we can use in a docker volume.
shutil.copytree(
self.csmith_includes_path,
self.site_data_path / "includes.tmp",
)
# Atomic directory rename to prevent race on install().
(self.site_data_path / "includes.tmp").rename(
self.site_data_path / "includes"
)
def benchmark_from_seed(
self, seed: int, max_retries: int = 3, retry_count: int = 0
) -> CsmithBenchmark:
"""Get a benchmark from a uint32 seed.
:param seed: A number in the range 0 <= n < 2^32.
:return: A benchmark instance.
:raises OSError: If Csmith fails.
:raises BenchmarkInitError: If the C program generated by Csmith cannot
be lowered to LLVM-IR.
"""
if retry_count >= max_retries:
raise OSError(
f"Csmith failed after {retry_count} {plural(retry_count, 'attempt', 'attempts')} "
f"with seed {seed}"
)
self.install()
# Run csmith with the given seed and pipe the output to clang to
# assemble a bitcode.
self.logger.debug("Exec csmith --seed %d", seed)
csmith = subprocess.Popen(
[str(self.csmith_bin_path), "--seed", str(seed)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Generate the C source.
src, stderr = csmith.communicate(timeout=300)
if csmith.returncode:
try:
stderr = "\n".join(
truncate(stderr.decode("utf-8"), max_line_len=200, max_lines=20)
)
logging.warning("Csmith failed with seed %d: %s", seed, stderr)
except UnicodeDecodeError:
# Failed to interpret the stderr output, generate a generic
# error message.
logging.warning("Csmith failed with seed %d", seed)
return self.benchmark_from_seed(
seed, max_retries=max_retries, retry_count=retry_count + 1
)
# Pre-process the source.
with tempfile.TemporaryDirectory() as tmpdir:
src_file = f"{tmpdir}/src.c"
with open(src_file, "wb") as f:
f.write(src)
preprocessed_src = self.gcc(
"-E",
"-I",
str(self.site_data_path / "includes"),
"-o",
"-",
src_file,
cwd=tmpdir,
timeout=60,
volumes={
str(self.site_data_path / "includes"): {
"bind": str(self.site_data_path / "includes"),
"mode": "ro",
}
},
)
return self.benchmark_class.create(
f"{self.name}/{seed}", preprocessed_src.encode("utf-8"), src
)
| 35.756098
| 98
| 0.620509
| 1,067
| 8,796
| 4.977507
| 0.324274
| 0.019582
| 0.020335
| 0.027114
| 0.123141
| 0.100358
| 0.041047
| 0.016946
| 0
| 0
| 0
| 0.010086
| 0.289905
| 8,796
| 245
| 99
| 35.902041
| 0.840218
| 0.309686
| 0
| 0.048611
| 0
| 0.006944
| 0.106824
| 0.022119
| 0
| 0
| 0
| 0.004082
| 0
| 1
| 0.090278
| false
| 0
| 0.111111
| 0.041667
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
46089e52d9f3438c0d2bdc953655d9f0dafbf49b
| 444
|
py
|
Python
|
dans_pymodules/power_of_two.py
|
DanielWinklehner/dans_pymodules
|
04dfdaeccc171712cad6eb24202608e2eda21eca
|
[
"MIT"
] | null | null | null |
dans_pymodules/power_of_two.py
|
DanielWinklehner/dans_pymodules
|
04dfdaeccc171712cad6eb24202608e2eda21eca
|
[
"MIT"
] | null | null | null |
dans_pymodules/power_of_two.py
|
DanielWinklehner/dans_pymodules
|
04dfdaeccc171712cad6eb24202608e2eda21eca
|
[
"MIT"
] | null | null | null |
__author__ = "Daniel Winklehner"
__doc__ = "Find out if a number is a power of two"
def power_of_two(number):
"""
Function that checks if the input value (data) is a power of 2
(i.e. 2, 4, 8, 16, 32, ...)
"""
res = 0
while res == 0:
res = number % 2
number /= 2.0
print("res: {}, data: {}".format(res, number))
if number == 1 and res == 0:
return True
return False
| 19.304348
| 66
| 0.529279
| 66
| 444
| 3.409091
| 0.575758
| 0.093333
| 0.071111
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051195
| 0.34009
| 444
| 22
| 67
| 20.181818
| 0.716724
| 0.202703
| 0
| 0
| 0
| 0
| 0.215569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
460b1c41c9223f051fce73e3d45305d26f20419f
| 4,092
|
py
|
Python
|
api_yamdb/reviews/models.py
|
LHLHLHE/api_yamdb
|
bda83815a47f3fda03d54220dfe41e9263ff1b05
|
[
"MIT"
] | null | null | null |
api_yamdb/reviews/models.py
|
LHLHLHE/api_yamdb
|
bda83815a47f3fda03d54220dfe41e9263ff1b05
|
[
"MIT"
] | null | null | null |
api_yamdb/reviews/models.py
|
LHLHLHE/api_yamdb
|
bda83815a47f3fda03d54220dfe41e9263ff1b05
|
[
"MIT"
] | null | null | null |
import datetime as dt
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ValidationError
from users.models import CustomUser
def validate_year(value):
"""
Год выпуска произведения не может быть больше текущего.
"""
if value > dt.datetime.now().year:
raise ValidationError(
'Год выпуска превышает текущий!')
return value
class Category(models.Model):
"""Модель категорий."""
name = models.CharField(max_length=256, verbose_name='Название')
slug = models.SlugField(
max_length=50,
unique=True,
verbose_name='Идентификатор')
class Meta:
ordering = ('name',)
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.slug
class Genre(models.Model):
"""Модель жанров."""
name = models.CharField(max_length=256, verbose_name='Название')
slug = models.SlugField(
max_length=50,
unique=True,
verbose_name='Идентификатор')
class Meta:
ordering = ('name',)
verbose_name = 'Жанр'
verbose_name_plural = 'Жанры'
def __str__(self):
return self.slug
class Title(models.Model):
"""Модель произведений."""
name = models.TextField(verbose_name='Название')
year = models.IntegerField(
validators=[validate_year],
verbose_name='Год выпуска')
description = models.TextField(
blank=True,
verbose_name='Описание')
genre = models.ManyToManyField(
Genre,
through='GenreTitle',
verbose_name='Жанры')
category = models.ForeignKey(
Category,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='titles',
verbose_name='Категория')
class Meta:
ordering = ('name',)
verbose_name = 'Произведение'
verbose_name_plural = 'Произведения'
def __str__(self):
return (
f'name: {self.name}, '
f'year: {self.year}, '
)
class GenreTitle(models.Model):
"""Модель для связи произведений и жанров отношением многие ко многим."""
genre = models.ForeignKey(
Genre,
on_delete=models.SET_NULL,
blank=True,
null=True)
title = models.ForeignKey(Title, on_delete=models.CASCADE)
def __str__(self):
return f'{self.genre} --- {self.title}'
class Review(models.Model):
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
verbose_name='Произведение',
)
text = models.TextField(
verbose_name='текст',
)
author = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
verbose_name='Автор'
)
score = models.IntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(10)
],
verbose_name='Оценка'
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации'
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['author', 'title'],
name='unique review'
)
]
verbose_name = 'Отзыв'
verbose_name_plural = 'Отзывы'
default_related_name = 'reviews'
def __str__(self):
return self.text[:60]
class Comment(models.Model):
review = models.ForeignKey(
Review,
on_delete=models.CASCADE,
related_name='comments',
verbose_name='Отзыв',
)
text = models.TextField(verbose_name='Текст')
author = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
related_name='comments',
verbose_name='Автор'
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации'
)
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
def __str__(self):
return self.text
| 24.650602
| 77
| 0.608016
| 413
| 4,092
| 5.823245
| 0.285714
| 0.128067
| 0.040748
| 0.039917
| 0.441164
| 0.427027
| 0.384615
| 0.360499
| 0.321414
| 0.256133
| 0
| 0.005148
| 0.287879
| 4,092
| 165
| 78
| 24.8
| 0.820178
| 0.043255
| 0
| 0.389313
| 0
| 0
| 0.106729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053435
| false
| 0
| 0.038168
| 0.045802
| 0.381679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
460d6b3dd6eff2aa0cc70e0bbc8f6441baed0341
| 6,796
|
py
|
Python
|
makesense/graph.py
|
sieben/makesense
|
485e71903bcc9446482f21bb5d0c7a392ca1efca
|
[
"Apache-2.0"
] | 5
|
2015-02-03T12:28:55.000Z
|
2019-03-20T08:11:22.000Z
|
makesense/graph.py
|
sieben/makesense
|
485e71903bcc9446482f21bb5d0c7a392ca1efca
|
[
"Apache-2.0"
] | 4
|
2016-05-16T07:26:19.000Z
|
2016-06-23T22:22:10.000Z
|
makesense/graph.py
|
sieben/makesense
|
485e71903bcc9446482f21bb5d0c7a392ca1efca
|
[
"Apache-2.0"
] | 1
|
2016-05-16T07:28:53.000Z
|
2016-05-16T07:28:53.000Z
|
# -*- coding: utf-8 -*-
import json
import pdb
import os
from os.path import join as pj
import networkx as nx
import pandas as pd
from networkx.readwrite.json_graph import node_link_data
def chain():
g = nx.Graph()
# Horizontal
for i in range(11, 15):
g.add_edge(i, i + 1)
for i in range(7, 10):
g.add_edge(i, i + 1)
for i in range(4, 6):
g.add_edge(i, i + 1)
for i in range(2, 3):
g.add_edge(i, i + 1)
g.add_node(1)
# Trans height
g.add_edge(1, 2)
g.add_edge(1, 3)
g.add_edge(2, 4)
g.add_edge(2, 5)
g.add_edge(3, 5)
g.add_edge(3, 6)
g.add_edge(4, 7)
g.add_edge(4, 8)
g.add_edge(5, 8)
g.add_edge(5, 9)
g.add_edge(6, 9)
g.add_edge(6, 10)
g.add_edge(7, 11)
g.add_edge(7, 12)
g.add_edge(8, 12)
g.add_edge(8, 13)
g.add_edge(9, 13)
g.add_edge(9, 14)
g.add_edge(10, 14)
g.add_edge(10, 15)
def tree():
with open("graph_radio.json", "w") as f:
f.write(json_graph.dumps(g,sort_keys=True,
indent=4, separators=(',', ': ') ))
# Drawing
pos = nx.spectral_layout(g)
nx.draw(g, pos, node_color="g")
nx.draw_networkx_nodes(g, pos, nodelist=[1], node_color="b")
plt.savefig("topology_tree.pdf", format="pdf")
plt.show()
def plot_graph_chain(folder):
g = nx.DiGraph()
N = 7
for i in range(1, N):
g.add_edge(i + 1, i)
g.add_node(1, root=True)
with open("radio_tree.json", "w") as f:
f.write(json_graph.dumps(g, sort_keys=True,
indent=4, separators=(',', ': ')))
pos = nx.circular_layout(g)
nx.draw(g, pos=pos)
nx.draw_networkx_nodes(g, pos, node_color='g')
nx.draw_networkx_nodes(g, pos, nodelist=[1], node_color='b')
nx.draw_networkx_edges(g, pos, edge_color="r", arrows=True)
plt.savefig(pj(folder, "topology_chain.pdf"), format="pdf")
def flower():
g = wheel_graph(7)
g.add_edge(6, 1)
g.add_edge(7, 6)
g.add_edge(8, 7)
with open("radio_graph.json", "w") as f:
f.write(json_graph.dumps(g, sort_keys=True,
indent=4, separators=(',', ': ')))
pos = nx.spring_layout(g)
nx.draw(g, pos=pos)
nx.draw_networkx_nodes(g,pos,
node_color='g')
nx.draw_networkx_nodes(g,pos,
nodelist=[8],
node_color='b')
#nx.draw_networkx_edges(g, pos, edge_color="r", arrows=True)
plt.savefig("topology_fleur.pdf", format="pdf")
plt.show()
def plot_graph(self):
"""
Plot the transmission graph of the simulation.
TODO: Draw arrows and have a directed graph.
http://goo.gl/Z697dH
TODO: Graph with big nodes for big transmissions
"""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Transmission / RPL tree")
ax1.axis("off")
val_color = {"udp_server": 0.5714285714285714}
pos = {node: data["pos"]
for node, data in self.radio_tree.nodes(data=True)}
# color for all nodes
node_color = [val_color.get(data["mote_type"], 0.25)
for node, data in self.radio_tree.nodes(data=True)]
# Drawing the nodes
nx.draw_networkx_nodes(self.radio_tree, pos, node_color=node_color, ax=ax1)
nx.draw_networkx_labels(self.radio_tree, pos, ax=ax1)
# Drawing radio edges
nx.draw_networkx_edges(self.radio_tree, pos, edgelist=self.radio_tree.edges(),
width=8, alpha=0.5, ax=ax1)
# Adding the depth of each node.
with open(PJ(self.result_dir, "depth.csv")) as depth_f:
reader = DictReader(depth_f)
for row in reader:
node = int(row["node"])
depth = row["depth"]
ax1.text(pos[node][0] + 5, pos[node][1] + 5, depth,
bbox=dict(facecolor='red', alpha=0.5),
horizontalalignment='center')
# Drawing RPL edges
nx.draw_networkx_edges(
self.rpl_tree, pos, edge_color='r', nodelist=[], arrows=True, ax=ax1)
img_path = PJ(self.img_dir, "graph.pdf")
fig.savefig(img_path, format="pdf")
update_report(self.result_dir, "plot_graph", {
"img_src": "img/graph.pdf",
"comment": """
When the edge is thick it means edges are in an RPL instance.
Otherwise it means that the two nodes can see each others.
""",
"text": """
We generate a random geometric graph then use information coming
to the RPL root to construct the gateway representation of the RPL
tree. We add into this tree representation the traffic generated.
"""})
def transmission_graph(self):
"""
Plot the transmission graph of the simulation.
"""
settings = self.settings["transmission_graph"]
output_path = pj(self.result_folder_path, *settings["output_path"])
fig_rplinfo, ax_transmission_graph = plt.subplots()
net = nx.Graph()
# nodes
mote_types = self.settings["mote_types"]
motes = self.settings["motes"]
position = {}
for mote in motes:
mote_type = mote["mote_type"]
mote_id = mote["mote_id"]
position[mote_id] = (mote["x"], mote["y"])
mote_types[mote_type] \
.setdefault("nodes", []) \
.append(mote["mote_id"])
# edges
transmitting_range = self.settings["transmitting_range"]
for couple in itertools.product(motes, motes):
if 0 < distance(couple) <= transmitting_range:
net.add_edge(couple[0]["mote_id"],
couple[1]["mote_id"])
for mote_type in mote_types:
color = mote_types[mote_type]["color"]
nodelist = mote_types[mote_type]["nodes"]
nx.draw_networkx_nodes(net, position,
nodelist=nodelist,
node_color=color,
ax=ax_transmission_graph)
nx.draw_networkx_edges(net, pos=position, ax=ax_transmission_graph)
# labels
nx.draw_networkx_labels(net, position, ax=ax_transmission_graph)
plt.axis('off')
plt.savefig(output_path) # save as PNG
return ax_transmission_graph
def rpl_graph(folder):
"""
Build up the RPL representation at the gateway
"""
output_folder = pj(folder, "results", "graph")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
df = pd.read_csv(pj(folder, "results", "messages.csv"))
parent_df = df[df.message_type == "parent"]
rpl_graph = nx.DiGraph()
for c, p in parent_df.iterrows():
rpl_graph.add_edge(p["mote_id"], p["node"])
with open(pj(output_folder, "rpl_graph.json"), "w") as f:
f.write(json.dumps(node_link_data(rpl_graph),
sort_keys=True, indent=4))
| 27.626016
| 82
| 0.59741
| 1,003
| 6,796
| 3.874377
| 0.217348
| 0.03088
| 0.057643
| 0.034225
| 0.306999
| 0.241894
| 0.221307
| 0.221307
| 0.199434
| 0.17473
| 0
| 0.026464
| 0.266039
| 6,796
| 245
| 83
| 27.738776
| 0.752606
| 0.074897
| 0
| 0.1
| 0
| 0
| 0.131274
| 0
| 0
| 0
| 0
| 0.008163
| 0
| 1
| 0.04375
| false
| 0
| 0.04375
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
460e2d1d15ba01da3b9d59848ee03f3a71e7df89
| 5,511
|
py
|
Python
|
FFTNet_dilconv.py
|
mimbres/FFTNet
|
3a6bfb4731bab2e0a59fc3a1ddb55f19f84aeba2
|
[
"Apache-2.0"
] | null | null | null |
FFTNet_dilconv.py
|
mimbres/FFTNet
|
3a6bfb4731bab2e0a59fc3a1ddb55f19f84aeba2
|
[
"Apache-2.0"
] | null | null | null |
FFTNet_dilconv.py
|
mimbres/FFTNet
|
3a6bfb4731bab2e0a59fc3a1ddb55f19f84aeba2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 09:46:10 2018
@author: sungkyun
FFTNet model using 2x1 dil-conv
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Models with Preset (for convenience)
'''
dim_input: dimension of input (256 for 8-bit mu-law input)
num_layer: number of layers (11 in paper). receptive field = 2^11 (2,048)
io_ch: number of input(=output) channels in each fft layers
skip_ch: number of skip-channels, only required for fft-residual net.
Annotations:
B: batch dimension
C: channel dimension
L: length dimension
'''
def fftnet_base(input_dim=256, num_layer=11, io_ch=256):
return FFTNet(input_dim=input_dim, num_layer=num_layer, io_ch=io_ch, skip_ch=0, bias=True)
def fftnet_residual(input_dim=256, num_layer=11, io_ch=256, skip_ch=256):
return FFTNet(input_dim=input_dim, num_layer=num_layer, io_ch=io_ch, skip_ch=skip_ch, bais=True)
# FFT_Block: define a basic FFT Block
'''
FFT_Block:
- using 2x1 dilated-conv, instead of LR split 1x1 conv.
- described in the paper, section 2.2.
- in case of the first layer used in the first FFT_Block,
we use nn.embedding layer for one-hot index(0-255) entries.
'''
class FFT_Block(nn.Module):
def __init__(self, cond_dim=26, io_ch=int, recep_sz=int, bias=True):
super(FFT_Block, self).__init__()
self.cond_dim=cond_dim # Number of dimensions of condition input
self.io_ch = io_ch
self.recep_sz = recep_sz # Size of receptive field: i.e., the 1st layer has receptive field of 2^11(=2,048). 2nd has 2^10.
self.bias = bias # If True, use bias in 1x1 conv.
self.dilation = int(recep_sz / 2)
self.conv_2x1_LR = nn.Conv1d(in_channels=self.io_ch, out_channels=self.io_ch,
kernel_size=2, stride=1, dilation=self.dilation, bias=self.bias)
self.conv_2x1_VLR = nn.Conv1d(in_channels=self.cond_dim, out_channels=self.io_ch,
kernel_size=2, stride=1, dilation=self.dilation, bias=self.bias)
self.conv_1x1_last = nn.Conv1d(in_channels=self.io_ch, out_channels=self.io_ch,
kernel_size=1, stride=1, bias=self.bias)
return None
def forward(self, x, cond):
z = self.conv_2x1_LR(x) # Eq(1), z = w_L*x_L + w_R*x_R
z = z + self.conv_2x1_VLR(cond) # Eq(2), z = (WL ∗ xL + WR ∗ xR) + (VL ∗ hL + VR ∗ hR)
x = F.relu(self.conv_1x1_last(F.relu(z))) # x = ReLU(conv1x1(ReLU(z)))
return x
'''
FFTNet:
- [11 FFT_blocks] --> [FC_layer] --> [softmax]
'''
class FFTNet(nn.Module):
def __init__(self, input_dim=256, cond_dim=26, num_layer=11, io_ch=256, skip_ch=0, bias=True):
super(FFTNet, self).__init__()
self.input_dim = input_dim # 256 (=num_classes)
self.cond_dim = cond_dim # 26
self.num_layer = num_layer # 11
self.io_ch = io_ch # 256 ch. in the paper
self.skip_ch = skip_ch # Not implemented yet (no skip channel in the paper)
self.bias = bias # If True, use bias in 2x1 conv.
self.max_recep_sz = int(pow(2, self.num_layer)) # 2^11, max receptive field size
# Embedding layer: one-hot_index -> embedding -> 256ch output
self.input_embedding_layer = nn.Embedding(num_embeddings=self.input_dim,
embedding_dim=self.io_ch)
# Constructing FFT Blocks:
blocks = nn.ModuleList()
for l in range(self.num_layer):
recep_sz = int(pow(2, self.num_layer-l)) # 1024, 512, ... 2
blocks.append( FFT_Block(cond_dim=self.cond_dim,
io_ch=self.io_ch,
recep_sz=recep_sz,
bias=self.bias) )
self.fft_blocks=blocks
# Final FC layer:
self.fc = nn.Linear(in_features=self.io_ch, out_features=self.io_ch)
return None
def forward(self, x, cond, gen_mod=False):
# Padding x:
zpad_sz = int(self.max_recep_sz)
x = F.pad(x, (zpad_sz, 0), 'constant', 128) # 128? or 0?
# Embedding(x):
x = self.input_embedding_layer(x) # In : BxL, Out: BxLxC
x = x.permute(0,2,1) # Out: BxCxL
# FFT_Blocks:
for l in range(self.num_layer):
# Padding cond:
zpad_sz = int(self.max_recep_sz/pow(2, l))
padded_cond = F.pad(cond, (zpad_sz, 0), 'constant', 0)
x = self.fft_blocks[l](x, padded_cond)
if gen_mod is True:
x = x[:,:,-1] # In generator mode, take the last one sample only.
x = x.reshape(-1, 1, self.io_ch) # (BxC) --> (Bx1xC)
else:
x = x[:,:,:-1] # In training mode, right-omit 1 is required.
x = x.permute(0,2,1) # (BxCxL) --> (BxLxC)
x = self.fc(x) # (BxLxC)
# NOTE: in PyTorch, softmax() is included in CE loss.
return x
| 40.822222
| 147
| 0.551987
| 793
| 5,511
| 3.650694
| 0.253468
| 0.033161
| 0.033161
| 0.027634
| 0.296028
| 0.248359
| 0.240069
| 0.188256
| 0.142314
| 0.124698
| 0
| 0.045067
| 0.339684
| 5,511
| 135
| 148
| 40.822222
| 0.749382
| 0.189802
| 0
| 0.238806
| 0
| 0
| 0.00427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.059701
| 0.029851
| 0.268657
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4612cb4af177cb85c305bcbf02040e60659a77f5
| 3,008
|
py
|
Python
|
keras_textclassification/conf/path_config.py
|
atom-zh/Keras-TextClassification
|
26c549e8e23c6a10905c2dcef7eef557dc43c932
|
[
"MIT"
] | null | null | null |
keras_textclassification/conf/path_config.py
|
atom-zh/Keras-TextClassification
|
26c549e8e23c6a10905c2dcef7eef557dc43c932
|
[
"MIT"
] | null | null | null |
keras_textclassification/conf/path_config.py
|
atom-zh/Keras-TextClassification
|
26c549e8e23c6a10905c2dcef7eef557dc43c932
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/6/5 21:04
# @author :Mo
# @function :file of path
import os
import pathlib
import sys
# 项目的根目录
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
path_root = path_root.replace('\\', '/')
path_top = str(pathlib.Path(os.path.abspath(__file__)).parent.parent.parent)
path_top = path_top.replace('\\', '/')
# path of embedding
path_embedding_user_dict = path_root + '/data/embeddings/user_dict.txt'
path_embedding_random_char = path_root + '/data/embeddings/term_char.txt'
path_embedding_random_word = path_root + '/data/embeddings/term_word.txt'
path_embedding_bert = path_root + '/data/embeddings/chinese_L-12_H-768_A-12/'
path_embedding_xlnet = path_root + '/data/embeddings/chinese_xlnet_mid_L-24_H-768_A-12/'
path_embedding_albert = path_root + '/data/embeddings/albert_base_zh'
path_embedding_vector_word2vec_char = path_root + '/data/embeddings/multi_label_char.vec'
path_embedding_vector_word2vec_word = path_root + '/data/embeddings/multi_label_word.vec'
path_embedding_vector_word2vec_char_bin = path_root + '/data/embeddings/multi_label_char.bin'
path_embedding_vector_word2vec_word_bin = path_root + '/data/embeddings/multi_label_word.bin'
# classify data of baidu qa 2019
path_baidu_qa_2019_train = path_root + '/data/baidu_qa_2019/baike_qa_train.csv'
path_baidu_qa_2019_valid = path_root + '/data/baidu_qa_2019/baike_qa_valid.csv'
# 今日头条新闻多标签分类
path_byte_multi_news_train = path_root + '/data/byte_multi_news/train.csv'
path_byte_multi_news_valid = path_root + '/data/byte_multi_news/valid.csv'
path_byte_multi_news_label = path_root + '/data/byte_multi_news/labels.csv'
# classify data of baidu qa 2019
path_sim_webank_train = path_root + '/data/sim_webank/train.csv'
path_sim_webank_valid = path_root + '/data/sim_webank/valid.csv'
path_sim_webank_test = path_root + '/data/sim_webank/test.csv'
# classfiy multi labels 2021
path_multi_label_train = path_root + '/data/multi_label/train.csv'
path_multi_label_valid = path_root + '/data/multi_label/valid.csv'
path_multi_label_labels = path_root + '/data/multi_label/labels.csv'
path_multi_label_tests = path_root + '/data/multi_label/tests.csv'
# 路径抽象层
path_label = path_multi_label_labels
path_train = path_multi_label_train
path_valid = path_multi_label_valid
path_tests = path_multi_label_tests
path_edata = path_root + "/../out/error_data.csv"
# fast_text config
path_out = path_top + "/out/"
# 模型目录
path_model_dir = path_root + "/data/model/fast_text/"
# 语料地址
path_model = path_root + '/data/model/fast_text/model_fast_text.h5'
# 超参数保存地址
path_hyper_parameters = path_root + '/data/model/fast_text/hyper_parameters.json'
# embedding微调保存地址
path_fineture = path_root + "/data/model/fast_text/embedding_trainable.h5"
# 保持 分类-标签 索引
path_category = path_root + '/data/multi_label/category2labels.json'
# l2i_i2l
path_l2i_i2l = path_root + '/data/multi_label/l2i_i2l.json'
| 41.777778
| 94
| 0.772939
| 469
| 3,008
| 4.528785
| 0.221748
| 0.120527
| 0.158192
| 0.103578
| 0.53484
| 0.227401
| 0.126177
| 0.028249
| 0
| 0
| 0
| 0.024719
| 0.112367
| 3,008
| 71
| 95
| 42.366197
| 0.770787
| 0.101396
| 0
| 0
| 0
| 0
| 0.370214
| 0.366003
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.073171
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4613a39367cc38fb7bf09761273d5ec4ce7cfaaf
| 5,540
|
py
|
Python
|
parser_tool/tests/test_htmlgenerator.py
|
Harvard-ATG/visualizing_russian_tools
|
e8e5cf8c5b7eee0b6855594ad41b3ccd70a2d467
|
[
"BSD-3-Clause"
] | 2
|
2020-07-10T14:17:03.000Z
|
2020-11-17T09:18:26.000Z
|
parser_tool/tests/test_htmlgenerator.py
|
eelegiap/visualizing_russian_tools
|
9c36baebc384133c7c27d7a7c4e0cedc8cb84e74
|
[
"BSD-3-Clause"
] | 13
|
2019-03-17T13:27:31.000Z
|
2022-01-18T17:03:14.000Z
|
parser_tool/tests/test_htmlgenerator.py
|
eelegiap/visualizing_russian_tools
|
9c36baebc384133c7c27d7a7c4e0cedc8cb84e74
|
[
"BSD-3-Clause"
] | 2
|
2019-10-19T16:37:44.000Z
|
2020-06-22T13:30:20.000Z
|
# -*- coding: utf-8 -*-
import unittest
from xml.etree import ElementTree as ET
from parser_tool import tokenizer
from parser_tool import htmlgenerator
class TestHtmlGenerator(unittest.TestCase):
def _maketokendict(self, **kwargs):
token_text = kwargs.get("token", "")
token_dict = {
"token": token_text,
"index": kwargs.get("index", 0),
"offset": kwargs.get("offset", 0),
"tokentype": kwargs.get("tokentype", tokenizer.TOKEN_WORD),
"canonical": kwargs.get("canonical", tokenizer.canonical(token_text)),
"form_ids": kwargs.get("form_ids", []),
"level": kwargs.get("level", ""),
}
return token_dict
def test_render_token_russian_word(self):
token_text = "первоку́рсник"
token_dict = self._maketokendict(token=token_text, tokentype=tokenizer.TOKEN_RUS, level="3A", form_ids=["174128"])
rendered = htmlgenerator.render_token(token_dict)
node_type, el = rendered['node_type'], rendered['element']
self.assertEqual(htmlgenerator.ELEMENT_NODE, node_type)
self.assertEqual("span", el.tag)
self.assertEqual({
"class": "word parsed level3",
"data-form-ids": ",".join(token_dict['form_ids']),
"data-level": token_dict['level']
}, el.attrib)
self.assertEqual(token_text, el.text)
def test_render_token_english_word(self):
token_text = "hypothetical"
token_dict = self._maketokendict(token=token_text, tokentype=tokenizer.TOKEN_WORD)
rendered = htmlgenerator.render_token(token_dict)
node_type, el = rendered['node_type'], rendered['element']
self.assertEqual(htmlgenerator.ELEMENT_NODE, node_type)
self.assertEqual("span", el.tag)
self.assertEqual({"class": "word"}, el.attrib)
self.assertEqual(token_text, el.text)
def test_render_token_with_multiple_spaces(self):
token_text = " " * 3
expected_text = token_text.replace(" ", "\u00A0\u00A0")
token_dict = self._maketokendict(token=token_text, tokentype=tokenizer.TOKEN_SPACE)
rendered = htmlgenerator.render_token(token_dict)
self.assertEqual(htmlgenerator.TEXT_NODE, rendered['node_type'])
self.assertEqual(expected_text, rendered['text'])
def test_render_token_with_punctuation(self):
token_text = "')."
expected_text = token_text
token_dict = self._maketokendict(token=token_text, tokentype=tokenizer.TOKEN_SPACE)
rendered = htmlgenerator.render_token(token_dict)
self.assertEqual(htmlgenerator.TEXT_NODE, rendered['node_type'])
self.assertEqual(expected_text, rendered['text'])
def test_tokens_with_leading_punct_to_html(self):
# (собака) dog
tokens = [
self._maketokendict(token="(", tokentype=tokenizer.TOKEN_PUNCT),
self._maketokendict(token="собака", tokentype=tokenizer.TOKEN_RUS, level="1E", form_ids=["7599"]),
self._maketokendict(token=")", tokentype=tokenizer.TOKEN_RUS),
self._maketokendict(token=" ", tokentype=tokenizer.TOKEN_SPACE),
self._maketokendict(token="dog", tokentype=tokenizer.TOKEN_WORD),
]
html = htmlgenerator.tokens2html(tokens)
expected_html = '<pre class="words">(<span data-form-ids="7599" data-level="1E" class="word parsed level1">собака</span><span class="word">)</span> <span class="word">dog</span></pre>'
self.assertEqual(expected_html, html)
def test_tokens2html(self):
tokens = [
self._maketokendict(token="A", tokentype=tokenizer.TOKEN_WORD),
self._maketokendict(token=" ", tokentype=tokenizer.TOKEN_SPACE),
self._maketokendict(token="первоку́рсник", tokentype=tokenizer.TOKEN_RUS, level="3A", form_ids=["174128"]),
self._maketokendict(token=" ", tokentype=tokenizer.TOKEN_SPACE),
self._maketokendict(token="|", tokentype=tokenizer.TOKEN_PUNCT),
self._maketokendict(token="первоку́рсница", tokentype=tokenizer.TOKEN_RUS, level="3A", form_ids=["174128"]),
self._maketokendict(token=" ", tokentype=tokenizer.TOKEN_SPACE),
]
html = htmlgenerator.tokens2html(tokens)
root = ET.fromstring(html)
# Check the root element (e.g. container)
self.assertEqual("pre", root.tag)
self.assertEqual({"class": "words"}, root.attrib)
# Check that we have the expected number of child elements (1 element for each word or russian token)
expected_word_elements = sum([1 for t in tokens if t['tokentype'] in (tokenizer.TOKEN_WORD, tokenizer.TOKEN_RUS)])
self.assertEqual(expected_word_elements, len(root))
# Now check the first few tokens...
# 1) Check that the first child contains the text of the first token
self.assertEqual(tokens[0]['token'], root[0].text)
self.assertEqual("span", root[0].tag)
self.assertEqual({"class": "word"}, root[0].attrib)
# 2) Check that the first child's tail contains the text of the second token since it's a space token
self.assertEqual(tokens[1]['token'], root[0].tail)
# 3) Check that the second child contains the text of the third token
self.assertEqual(tokens[2]['token'], root[1].text)
self.assertEqual("span", root[1].tag)
self.assertEqual({'class': 'word parsed level3', 'data-form-ids': '174128', 'data-level': '3A'}, root[1].attrib)
| 49.026549
| 192
| 0.658845
| 656
| 5,540
| 5.382622
| 0.179878
| 0.097706
| 0.110734
| 0.061456
| 0.533277
| 0.480317
| 0.448032
| 0.448032
| 0.448032
| 0.441518
| 0
| 0.015743
| 0.208845
| 5,540
| 112
| 193
| 49.464286
| 0.789185
| 0.079964
| 0
| 0.298851
| 0
| 0.011494
| 0.121855
| 0.014937
| 0
| 0
| 0
| 0
| 0.264368
| 1
| 0.08046
| false
| 0
| 0.045977
| 0
| 0.149425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4615c8d476cced9b6746382173a9829cad6f16c7
| 995
|
bzl
|
Python
|
external_plugin_deps.bzl
|
michalgagat/plugins_oauth
|
47cc344013bd43a4ac508c578f2d93f37a166ee6
|
[
"Apache-2.0",
"MIT"
] | 143
|
2015-03-09T21:18:39.000Z
|
2022-03-02T13:27:12.000Z
|
external_plugin_deps.bzl
|
michalgagat/plugins_oauth
|
47cc344013bd43a4ac508c578f2d93f37a166ee6
|
[
"Apache-2.0",
"MIT"
] | 162
|
2015-03-15T04:00:41.000Z
|
2022-02-24T07:29:17.000Z
|
external_plugin_deps.bzl
|
michalgagat/plugins_oauth
|
47cc344013bd43a4ac508c578f2d93f37a166ee6
|
[
"Apache-2.0",
"MIT"
] | 97
|
2015-02-27T18:35:20.000Z
|
2022-01-08T13:17:21.000Z
|
load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps(omit_commons_codec = True):
JACKSON_VERS = "2.10.2"
maven_jar(
name = "scribejava-core",
artifact = "com.github.scribejava:scribejava-core:6.9.0",
sha1 = "ed761f450d8382f75787e8fee9ae52e7ec768747",
)
maven_jar(
name = "jackson-annotations",
artifact = "com.fasterxml.jackson.core:jackson-annotations:" + JACKSON_VERS,
sha1 = "3a13b6105946541b8d4181a0506355b5fae63260",
)
maven_jar(
name = "jackson-databind",
artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VERS,
sha1 = "0528de95f198afafbcfb0c09d2e43b6e0ea663ec",
deps = [
"@jackson-annotations//jar",
],
)
if not omit_commons_codec:
maven_jar(
name = "commons-codec",
artifact = "commons-codec:commons-codec:1.4",
sha1 = "4216af16d38465bbab0f3dff8efa14204f7a399a",
)
| 34.310345
| 84
| 0.629146
| 92
| 995
| 6.641304
| 0.413043
| 0.07856
| 0.07856
| 0.062193
| 0.124386
| 0.124386
| 0
| 0
| 0
| 0
| 0
| 0.151678
| 0.251256
| 995
| 28
| 85
| 35.535714
| 0.668456
| 0
| 0
| 0.148148
| 0
| 0
| 0.455276
| 0.376884
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|