hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
094d973f1a0b76ddedf08b03767accf5c0cfd497
| 7,467
|
py
|
Python
|
mlcomponents/encoderdecoder.py
|
microsoft/aaai21-copy-that
|
7dfb2ebabbbf1165a33c2430ef2f2571e487b4fd
|
[
"MIT"
] | 7
|
2021-06-21T17:13:23.000Z
|
2022-02-25T06:28:24.000Z
|
mlcomponents/encoderdecoder.py
|
microsoft/aaai21-copy-that
|
7dfb2ebabbbf1165a33c2430ef2f2571e487b4fd
|
[
"MIT"
] | null | null | null |
mlcomponents/encoderdecoder.py
|
microsoft/aaai21-copy-that
|
7dfb2ebabbbf1165a33c2430ef2f2571e487b4fd
|
[
"MIT"
] | 2
|
2021-09-13T12:32:16.000Z
|
2022-02-19T13:28:35.000Z
|
import logging
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
import torch
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
from mlcomponents.seqdecoding import SeqDecoder
from mlcomponents.seqencoder import SequenceEncoder
class EncoderDecoder(BaseComponent):
LOGGER = logging.getLogger('EncoderDecoder')
def __init__(self, name: str, input_sequence_encoder: SequenceEncoder,
output_sequence_decoder: SeqDecoder,
hyperparameters: Optional[Dict[str, Any]] = None) -> None:
super(EncoderDecoder, self).__init__(name, hyperparameters)
self.__input_sequence_encoder = input_sequence_encoder
self.__output_sequence_decoder = output_sequence_decoder
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return { }
def _finalize_component_metadata_and_model(self) -> None:
pass
@property
def input_sequence_encoder(self):
return self.__input_sequence_encoder
@property
def output_sequence_decoder(self):
return self.__output_sequence_decoder
def _load_metadata_from_sample(self, data_to_load: Edit) -> None:
self.__input_sequence_encoder.load_metadata_from_sample(data_to_load.input_sequence)
self.__output_sequence_decoder.load_metadata_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=data_to_load.input_sequence,
output_sequence=data_to_load.output_sequence
))
TensorizedData = NamedTuple('EncoderDecoderTensorizedData', [
('input_sequence', Any),
('output_sequence', Any),
])
def load_data_from_sample(self, data_to_load: Edit) -> Optional['EncoderDecoder.TensorizedData']:
return self.TensorizedData(
input_sequence=self.__input_sequence_encoder.load_data_from_sample([SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END]),
output_sequence=self.__output_sequence_decoder.load_data_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=[SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END],
output_sequence=data_to_load.output_sequence
))
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.initialize_minibatch(),
'output_sequences': self.__output_sequence_decoder.initialize_minibatch(),
}
def extend_minibatch_by_sample(self, datapoint: 'EncoderDecoder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.__input_sequence_encoder.extend_minibatch_by_sample(
datapoint=datapoint.input_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['input_sequences'])
continue_extending &= self.__output_sequence_decoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.finalize_minibatch(accumulated_minibatch_data['input_sequences']),
'output_sequences': self.__output_sequence_decoder.finalize_minibatch(accumulated_minibatch_data['output_sequences'])
}
def forward(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any]):
input_encoding = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences,
return_embedded_sequence=True
)
memories, memories_lengths, output_state, input_sequence_token_embeddings = input_encoding
decoder_loss = self.__output_sequence_decoder.forward(memories=memories, memories_lengths=memories_lengths,
initial_state=output_state,
input_sequence_token_embeddings=input_sequence_token_embeddings,
**output_sequences)
return decoder_loss
def greedy_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=50) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.greedy_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences])
def beam_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=150) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.beam_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences],
)
def __prepare_decoding(self, ground_input_sequences, input_sequences):
memories, memory_lengths, output_state = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences)
return ground_input_sequences, output_state, memories, memory_lengths
def compute_likelihood(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any],
return_debug_info: bool = False):
with torch.no_grad():
memories, memories_lengths, output_state = self.__input_sequence_encoder.forward(input_sequence_data=input_sequences)
return self.__output_sequence_decoder.compute_likelihood(memories=memories,
memories_lengths=memories_lengths,
initial_state=output_state,
return_debug_info= return_debug_info,
**output_sequences)
| 57.438462
| 171
| 0.613633
| 692
| 7,467
| 6.160405
| 0.151734
| 0.085386
| 0.068966
| 0.064509
| 0.617406
| 0.533896
| 0.440066
| 0.362186
| 0.362186
| 0.362186
| 0
| 0.000983
| 0.31887
| 7,467
| 129
| 172
| 57.883721
| 0.8372
| 0
| 0
| 0.203884
| 0
| 0
| 0.034478
| 0.01172
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145631
| false
| 0.009709
| 0.067961
| 0.058252
| 0.359223
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
094ecb7e5b2ee93093ff48eabf2ef2a4ed487787
| 8,195
|
py
|
Python
|
mplStyle/MplTickStyle.py
|
khanfarhan10/mplStyle
|
f657f54c6c101811b8bf0c44f4b16d4f4926685d
|
[
"BSD-3-Clause"
] | 39
|
2015-03-08T23:05:01.000Z
|
2022-02-07T16:03:35.000Z
|
mplStyle/MplTickStyle.py
|
khanfarhan10/mplStyle
|
f657f54c6c101811b8bf0c44f4b16d4f4926685d
|
[
"BSD-3-Clause"
] | null | null | null |
mplStyle/MplTickStyle.py
|
khanfarhan10/mplStyle
|
f657f54c6c101811b8bf0c44f4b16d4f4926685d
|
[
"BSD-3-Clause"
] | 23
|
2015-03-08T19:56:59.000Z
|
2021-07-15T15:16:26.000Z
|
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
""": A class containing tick mark style information."""
__version__ = "$Revision: #1 $"
#===========================================================================
from . import types as S
from .MplBasicLineStyle import MplBasicLineStyle
from .MplTextStyle import MplTextStyle
import matplotlib.axis as mplaxis
#===========================================================================
__all__ = [ 'MplTickStyle' ]
#===========================================================================
class MplTickStyle( S.SubStyle ):
""": Style properties for managing matplotlib axis tick elements.
"""
labels = S.property.SubStyle( MplTextStyle, doc = """
The style properties for any text labels placed at tick marks along the
primary axis edge.
If this is on the X-Axis, then the primary edge is the bottom.
If this is on the Y-Axis, then the primary edge is the left.
= SEE ALSO
- :ref:`MplTextStyle <mplStyle_MplTextStyle>`
""" )
secondaryLabels = S.property.SubStyle( MplTextStyle, doc = """
The style properties for any text labels placed at tick marks along the
secondary axis edge.
If this is on the X-Axis, then the secondary edge is the top.
If this is on the Y-Axis, then the secondary edge is the right.
= SEE ALSO
- :ref:`MplTextStyle <mplStyle_MplTextStyle>`
""" )
marks = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the tick marks along the primary axis edge.
If this is on the X-Axis, then the primary edge is the bottom.
If this is on the Y-Axis, then the primary edge is the left.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
secondaryMarks = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the tick marks along the secondary axis edge.
If this is on the X-Axis, then the secondary edge is the top.
If this is on the Y-Axis, then the secondary edge is the right.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
grid = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the grid lines.
Grid lines are present for each tick mark. This means that if there is no
tick locator for an axis, then there are no ticks to use for grid lines.
Setting the visibility of the tick marks to True will ensure that a tick
locator is present to use for generating grid lines.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
length = S.property.Float( min = 0.0, doc = """
The length of the ticks (in points).
""" )
width = S.property.Float( min = 0.0, doc = """
The width of the ticks (in points).
""" )
pad = S.property.Float( doc = """
The spacing between the ticks and their labels (in points).
""" )
#-----------------------------------------------------------------------
def apply( self, obj, defaults = {}, **kwargs ):
""": Apply this style to the given object using the supplied defaults.
= NOTE
- This can apply to any matplotlib Tick.
= INPUT VARIABLES
- obj The object to apply the style to.
- defaults Keyword-value dictionary with defaults values to use if a
property value is not specified.
- kwargs Keyword-value dictionary whose values will supercede
any values set by the properties of this sub-style.
"""
if not isinstance( obj, mplaxis.Tick ):
msg = "Unable to apply this sub-style to the given element." \
"Expected a matplotlib 'Tick' and instead received the " \
"following:\n%s" % (obj,)
raise Exception( msg )
# Labels
subKwargs = kwargs.get( 'labels', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['text', 'labels'] )
self.labels.apply( obj.label1, subDefaults, **subKwargs )
value = self.labels.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.label1On = value
# Secondary Labels
subKwargs = kwargs.get( 'secondaryLabels', {} )
subDefaults = S.lib.resolveDefaults( defaults,
['text', 'labels', 'secondaryLabels'] )
self.secondaryLabels.apply( obj.label2, subDefaults, **subKwargs )
value = self.secondaryLabels.getValue( 'visible',
subDefaults, **subKwargs )
if value is not None:
obj.label2On = value
# marks
subKwargs = kwargs.get( 'marks', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['marks'] )
self.marks.apply( obj.tick1line, subDefaults, **subKwargs )
value = self.marks.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.tick1On = value
# Secondary Marks
subKwargs = kwargs.get( 'secondaryMarks', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['secondaryMarks'] )
self.secondaryMarks.apply( obj.tick2line, subDefaults, **subKwargs )
value = self.secondaryMarks.getValue( 'visible',
subDefaults, **subKwargs )
if value is not None:
obj.tick2On = value
# Grid
subKwargs = kwargs.get( 'grid', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['grid'] )
self.grid.apply( obj.gridline, subDefaults, **subKwargs )
value = self.grid.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.gridOn = value
# Activate the grid as appropriate
#FUTURE: This should be here using Tick.major, but matplotlib
#FUTURE: needs to be fixed first.
#FUTURE obj.grid( self.grid.visible )
#FUTURE: Setup minor tick locators (as necessary)
# Length
value = self.getValue( 'length', defaults, **kwargs )
if value is not None:
obj._size = value
obj.tick1line.set_markersize( obj._size )
obj.tick2line.set_markersize( obj._size )
# Width
value = self.getValue( 'width', defaults, **kwargs )
if value is not None:
obj._width = value
obj.tick1line.set_markeredgewidth( obj._width )
obj.tick2line.set_markeredgewidth( obj._width )
# Pad
value = self.getValue( 'pad', defaults, **kwargs )
if value is not None:
obj.set_pad( value )
#-----------------------------------------------------------------------
| 37.591743
| 80
| 0.634899
| 988
| 8,195
| 5.241903
| 0.275304
| 0.038617
| 0.017378
| 0.015447
| 0.3715
| 0.333269
| 0.294651
| 0.276115
| 0.247345
| 0.247345
| 0
| 0.004691
| 0.219646
| 8,195
| 217
| 81
| 37.764977
| 0.80516
| 0.356437
| 0
| 0.35514
| 0
| 0
| 0.369637
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009346
| false
| 0
| 0.037383
| 0
| 0.130841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
095359d3e1736ca0199f424b5cea96012cb5fc24
| 3,286
|
py
|
Python
|
micolog/cache2.py
|
tclh123/micolog
|
9b6fcddb9f147fe20a0cbfe0e89eda07f69d0f68
|
[
"MIT"
] | null | null | null |
micolog/cache2.py
|
tclh123/micolog
|
9b6fcddb9f147fe20a0cbfe0e89eda07f69d0f68
|
[
"MIT"
] | null | null | null |
micolog/cache2.py
|
tclh123/micolog
|
9b6fcddb9f147fe20a0cbfe0e89eda07f69d0f68
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: cache.py
# Purpose:
#
# Author: xuming
#
# Created: 23-01-2011
# Copyright: (c) xuming 2011
# Licence: GPL
#-------------------------------------------------------------------------------
#!/usr/bin/env python
"""A simple cache warp for micolog
The main purpose of this module is to design a common layer to deal with all
methods which need been cached!
"""
from google.appengine.api import memcache
from utils import format_date
from datetime import datetime
from settings import ENABLE_MEMCACHE
def vcache(key="", time=0,args=()):
"""
Cache for normal method which return some object
example::
@vcache("blog.hotposts",args=('count'))
def hotposts(self,count=8):
return Entry.all().filter('entrytype =', 'post').filter("published =", True).order('-readtimes').fetch(count)
args:
key: keyname fo memcache
args: the list of cached args
time: relative number of seconds from current time.
"""
def _decorate(method):
def _wrapper(*cargs, **kwargs):
if not ENABLE_MEMCACHE:
return method(*cargs, **kwargs)
skey=key
if hasattr(cargs[0],"vkey"):
skey=key+cargs[0].vkey
for arg in args:
if kwargs.has_key(arg):
skey+="_"+str(arg)+"_"+str(kwargs[arg])
result=memcache.get(skey)
if result==None:
result = method(*cargs, **kwargs)
memcache.set(skey, result, time)
return result
return _wrapper
return _decorate
def cache(key="",time=0):
"""
Cache for request handler method, such as: get or post.
It will cache the web page.
example::
@cache(time=600)
def get(self,tags=None):
args:
key: optional key name. Request. path_qs as default.
time: relative number of seconds from current time.
"""
def _decorate(method):
def _wrapper(*args, **kwargs):
if not ENABLE_MEMCACHE:
method(*args, **kwargs)
return
request=args[0].request
response=args[0].response
skey=key+ request.path_qs
#logging.info('skey:'+skey)
html= memcache.get(skey)
#arg[0] is BaseRequestHandler object
if html:
#logging.info('cache:'+skey)
response.last_modified =html[1]
ilen=len(html)
if ilen>=3:
response.set_status(html[2])
if ilen>=4:
for skey,value in html[3].items():
response.headers[skey]=value
response.out.write(html[0])
else:
if 'last-modified' not in response.headers:
response.last_modified = format_date(datetime.utcnow())
method(*args, **kwargs)
result=response.body
status_code = response.status_int
memcache.set(skey,(result,response.last_modified,status_code,response.headers),time)
return _wrapper
return _decorate
| 30.71028
| 121
| 0.529215
| 360
| 3,286
| 4.761111
| 0.394444
| 0.028005
| 0.035006
| 0.023337
| 0.109685
| 0.080513
| 0.080513
| 0.080513
| 0.080513
| 0.080513
| 0
| 0.013004
| 0.321363
| 3,286
| 107
| 122
| 30.71028
| 0.755605
| 0.357882
| 0
| 0.2
| 0
| 0
| 0.009505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.08
| 0
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
095b1d80f095695d78ca8e32322128326b38659a
| 5,112
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0189-Rotate-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0189-Rotate-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0189-Rotate-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0189-Rotate-Array.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-02
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0189 - (Medium) - Rotate Array
https://leetcode.com/problems/rotate-array/
Description:
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: nums = [1,2,3,4,5,6,7], k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: nums = [-1,-100,3,99], k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Constraints:
1 <= nums.length <= 105
-231 <= nums[i] <= 231 - 1
0 <= k <= 105
Follow up:
Try to come up with as many solutions as you can.
There are at least three different ways to solve this problem.
Could you do it in-place with O(1) extra space?
"""
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# exception case
if not isinstance(nums, list) or len(nums) <= 1 or k <= 0:
return
# main method
self._rotate_double_reverse(nums, k)
# Warning: the following method is correct only if gcd(len_num, k) == 1
# def _rotate_gcd1(self, nums: List[int], k: int) -> None:
# len_num = len(nums)
# if k > len_num:
# k %= len_num # avoid unnecessary rotate
# # old_start_index, old_end_index = 0, len_num - 1
# # new_start_index = len_num - k
# # new_end_index = new_start_index - 1
# # nums[old_start_index]...nums[new_end_index] go right (gap = +k)
# # nums[new_start_index]...nums[old_end_index] go left (gap = -(len_num - k))
# forward_move_gap = k
# backward_move_gap = len_num - k
# watershed = len_num - k
# # move one by one in order to get O(1) space efficiency
# # if O(n) space, then just init a new list and append numbers, quite easy
# cur_move_index = 0
# temp_from_num = nums[cur_move_index] # store the number that is going to replace another number
# move_counter = 0 # move len_num times in total
# while move_counter < len_num:
# if cur_move_index < watershed: # go right (gap = +k)
# temp_to_num = nums[cur_move_index + forward_move_gap] # store the number that is going to be replaced
# nums[cur_move_index + forward_move_gap] = temp_from_num
# temp_from_num = temp_to_num
# cur_move_index = cur_move_index + forward_move_gap
# else: # go left (gap = -(len_num - k))
# temp_to_num = nums[cur_move_index - backward_move_gap] # store the number that is going to be replaced
# nums[cur_move_index - backward_move_gap] = temp_from_num
# temp_from_num = temp_to_num
# cur_move_index = cur_move_index - backward_move_gap
#
# move_counter += 1
# 1. reverse the whole list; 2. split; 3. reserve two small lists respectively; 4. combine.
def _rotate_double_reverse(self, nums: List[int], k: int) -> None:
len_num = len(nums)
if k > len_num:
k %= len_num # avoid unnecessary rotate
# split nums[0]...nums[k] and nums[k]...nums[len_num]
watershed = len_num - k
nums.reverse()
# Way 1:
# nums[0: k] = reversed(nums[0: k])
# nums[k: len_num] = reversed(nums[k: len_num])
# Way 2
self._reverse_list_in_place(nums, 0, k - 1)
self._reverse_list_in_place(nums, k, len_num - 1)
@staticmethod
def _reverse_list_in_place(nums: List[int], start_index: int, end_index: int) -> None:
while start_index < end_index:
temp_num = nums[start_index]
nums[start_index] = nums[end_index]
nums[end_index] = temp_num
start_index += 1
end_index -= 1
def main():
# Example 1: Output: [5,6,7,1,2,3,4]
nums = [1, 2, 3, 4, 5, 6, 7]
k = 3
# Example 2: Output: [3,99,-1,-100]
# nums = [-1, -100, 3, 99]
# k = 2
# Example 3: Output: [4, 5, 6, 1, 2, 3]
# nums = [1, 2, 3, 4, 5, 6]
# k = 3
# init instance
solution = Solution()
# run & time
start = time.process_time()
solution.rotate(nums, k)
ans = nums
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 34.77551
| 121
| 0.569836
| 760
| 5,112
| 3.648684
| 0.253947
| 0.043274
| 0.047602
| 0.01154
| 0.332853
| 0.318067
| 0.247025
| 0.178868
| 0.150739
| 0.150739
| 0
| 0.049808
| 0.285211
| 5,112
| 146
| 122
| 35.013699
| 0.709086
| 0.499218
| 0
| 0
| 0
| 0
| 0.024723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.081081
| 0
| 0.243243
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
095c5cd039268240cc1f5e992dfc55ceb574b9a1
| 921
|
py
|
Python
|
d1/repeatedcharacter.py
|
jwbat/python-datastructures
|
82ff91f1ee8c76a382bd5d43cdefe4ffcde00528
|
[
"MIT"
] | null | null | null |
d1/repeatedcharacter.py
|
jwbat/python-datastructures
|
82ff91f1ee8c76a382bd5d43cdefe4ffcde00528
|
[
"MIT"
] | null | null | null |
d1/repeatedcharacter.py
|
jwbat/python-datastructures
|
82ff91f1ee8c76a382bd5d43cdefe4ffcde00528
|
[
"MIT"
] | null | null | null |
'''
One fcn returns the first nonrepeated character from string, s.
The other returns the first repeated character from the string.
'''
def print_dict(d):
for k, v in d.items():
print('\t', k, '=> ', v)
def char_count_dict(s):
d = dict()
for char in s:
count = d.get(char, 0)
d[char] = count + 1
return d
def first_nonrepeated_char(s, d):
for char in s:
if d[char] == 1:
return char
return None
def first_repeated_char(s):
st = set()
for char in s:
if char in st:
return char
st.add(char)
return None
#s = 'Where have I heard that?'
#s = 'Wherefore art thou Romeo?'
s = 'Grilled cheeses are great with mustard.'
s = s.lower()
d = char_count_dict(s)
print('\n string: ', s)
print('\n\t 1st nonrepeated char: ', first_nonrepeated_char(s, d))
print('\t 1st repeated char: ', first_repeated_char(s), '\n')
| 20.931818
| 66
| 0.599349
| 147
| 921
| 3.666667
| 0.346939
| 0.044527
| 0.050093
| 0.055659
| 0.12616
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.27253
| 921
| 43
| 67
| 21.418605
| 0.797015
| 0.205212
| 0
| 0.259259
| 0
| 0
| 0.148611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0
| 0
| 0.333333
| 0.185185
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
095ca8346512b61fba41d3997f22f02f7c9433ae
| 2,652
|
py
|
Python
|
alipay/aop/api/domain/KoubeiCateringPosDishcateTransferModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/KoubeiCateringPosDishcateTransferModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/KoubeiCateringPosDishcateTransferModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiCateringPosDishcateTransferModel(object):
def __init__(self):
self._cate_id = None
self._cook_id = None
self._dish_ids = None
self._shop_id = None
@property
def cate_id(self):
return self._cate_id
@cate_id.setter
def cate_id(self, value):
self._cate_id = value
@property
def cook_id(self):
return self._cook_id
@cook_id.setter
def cook_id(self, value):
self._cook_id = value
@property
def dish_ids(self):
return self._dish_ids
@dish_ids.setter
def dish_ids(self, value):
if isinstance(value, list):
self._dish_ids = list()
for i in value:
self._dish_ids.append(i)
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.cate_id:
if hasattr(self.cate_id, 'to_alipay_dict'):
params['cate_id'] = self.cate_id.to_alipay_dict()
else:
params['cate_id'] = self.cate_id
if self.cook_id:
if hasattr(self.cook_id, 'to_alipay_dict'):
params['cook_id'] = self.cook_id.to_alipay_dict()
else:
params['cook_id'] = self.cook_id
if self.dish_ids:
if isinstance(self.dish_ids, list):
for i in range(0, len(self.dish_ids)):
element = self.dish_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.dish_ids[i] = element.to_alipay_dict()
if hasattr(self.dish_ids, 'to_alipay_dict'):
params['dish_ids'] = self.dish_ids.to_alipay_dict()
else:
params['dish_ids'] = self.dish_ids
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishcateTransferModel()
if 'cate_id' in d:
o.cate_id = d['cate_id']
if 'cook_id' in d:
o.cook_id = d['cook_id']
if 'dish_ids' in d:
o.dish_ids = d['dish_ids']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| 28.212766
| 67
| 0.553922
| 349
| 2,652
| 3.905444
| 0.148997
| 0.102715
| 0.096845
| 0.061629
| 0.275128
| 0.272194
| 0.030814
| 0
| 0
| 0
| 0
| 0.001147
| 0.342383
| 2,652
| 93
| 68
| 28.516129
| 0.78039
| 0.015837
| 0
| 0.103896
| 0
| 0
| 0.071374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.025974
| 0.051948
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0961afc6929e36d21b0d3df4440d37f44a042e71
| 3,557
|
py
|
Python
|
main.py
|
darshanvjani/Wrapper_EVAI_Pytorch
|
cd3f11ad8b36f5d512288b8e5f7e15174bd2bbd1
|
[
"MIT"
] | null | null | null |
main.py
|
darshanvjani/Wrapper_EVAI_Pytorch
|
cd3f11ad8b36f5d512288b8e5f7e15174bd2bbd1
|
[
"MIT"
] | null | null | null |
main.py
|
darshanvjani/Wrapper_EVAI_Pytorch
|
cd3f11ad8b36f5d512288b8e5f7e15174bd2bbd1
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
import albumentations
import numpy as np
# from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets , transforms
import torchvision
from Wrapper_EVAI_Pytorch.dataloader import albumentation as A
from Wrapper_EVAI_Pytorch.utils.helper import *
from Wrapper_EVAI_Pytorch.utils.gradcam import *
from Wrapper_EVAI_Pytorch.utils.plot_metrics import *
from Wrapper_EVAI_Pytorch.utils.test import *
from Wrapper_EVAI_Pytorch.utils.train import *
from Wrapper_EVAI_Pytorch.models import resnet
class main():
def __init__(self,device):
self.classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
self.device = device
self.train_losses = []
self.test_losses = []
self.train_accuracy = []
self.test_accuracy = []
self.plot_train_acc=[]
self.lrs=[]
pass
def dataloading_aubumentation(self,mean,std,batch_size):
albu_obj = A.CIFAR10Albumentation()
train_transform = albu_obj.train_transform(mean,std)
test_transform = albu_obj.test_transform(mean,std)
trainset = torchvision.datasets.CIFAR10(root='/content',train=True,download=True,transform=train_transform)
testset = torchvision.datasets.CIFAR10(root='/content',train=False,download=True,transform=test_transform)
train_dataloader = torch.utils.data.DataLoader(trainset,num_workers=2,shuffle=True,batch_size=batch_size)
test_dataloader = torch.utils.data.DataLoader(testset,num_workers=2,shuffle=True,batch_size=batch_size)
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.trainset = trainset
self.testset = testset
def show_augmented_img(self,no_of_images):
helper.plot_images(self.trainset,no_of_images,self.classes)
def model(self,model_name,set_seed_no,show_summery):
if model_name == 'resnet34':
net = resnet.ResNet34()
self.net = net
if set_seed_no != None:
set_seed(set_seed_no,True)
if show_summery == True:
model_summary(self.net,(3,32,32))
return net
def train_model(self,optimizer,epochs,lam_reg,schedular,criterian,show_plots=True):
for epoch in range(epochs):
train(self.net,self.device,self.train_dataloader,optimizer,epoch,self.train_accuracy,self.train_losses,lam_reg,schedular,criterian,self.lrs)
test(self.net,self.device,self.test_dataloader,self.test_accuracy,self.test_losses,criterian)
if show_plots==True:
plot_metrics([self.train_accuracy,self.train_losses,self.test_accuracy,self.test_losses])
conf_matrix = compute_confusion_matrix(self.net,self.test_dataloader,self.device)
plot_confusion_matrix(conf_matrix)
def examination(self,no_of_images):
wrong_pred = wrong_predictions(self.net,self.test_dataloader,no_of_images,self.device,self.classes)
target_layers = ["layer1","layer2","layer3","layer4"]
gradcam_output, probs, predicted_classes = generate_gradcam(wrong_pred[:10],self.net,target_layers,self.device)
plot_gradcam(gradcam_output, target_layers, self.classes, (3, 32, 32),predicted_classes, wrong_pred[:10])
| 39.966292
| 153
| 0.699185
| 449
| 3,557
| 5.287305
| 0.280624
| 0.033698
| 0.044229
| 0.064869
| 0.277169
| 0.176917
| 0.033698
| 0.033698
| 0.033698
| 0
| 0
| 0.010608
| 0.204948
| 3,557
| 88
| 154
| 40.420455
| 0.828854
| 0.010402
| 0
| 0.03125
| 0
| 0
| 0.025701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0.015625
| 0.265625
| 0
| 0.390625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
117383a0b92d5b2084b9f6d9093f315d7ed4bad1
| 1,501
|
py
|
Python
|
util/visualizer.py
|
google/tim-gan
|
0139ad452b3c74e3c12791ebb719ea5979eb0d1f
|
[
"Apache-2.0"
] | 7
|
2020-11-17T21:38:46.000Z
|
2022-02-15T02:33:10.000Z
|
util/visualizer.py
|
google/tim-gan
|
0139ad452b3c74e3c12791ebb719ea5979eb0d1f
|
[
"Apache-2.0"
] | null | null | null |
util/visualizer.py
|
google/tim-gan
|
0139ad452b3c74e3c12791ebb719ea5979eb0d1f
|
[
"Apache-2.0"
] | 2
|
2021-01-23T12:13:29.000Z
|
2021-03-27T21:20:49.000Z
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import ntpath
import time
from . import util
from . import html
import scipy.misc
from io import BytesIO
def save_images(webpage, visuals, image_path, win_size=512):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
if label.startswith('output'):
fulllabel = label
label = 'output'
else:
fulllabel = label
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(fulllabel)
links.append(image_name)
webpage.add_images(ims, txts, links, width=win_size)
| 30.632653
| 80
| 0.688874
| 214
| 1,501
| 4.724299
| 0.523364
| 0.059347
| 0.025717
| 0.031652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01045
| 0.171219
| 1,501
| 48
| 81
| 31.270833
| 0.802251
| 0.429047
| 0
| 0.068966
| 0
| 0
| 0.024941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.275862
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
117624463673d5965908bc5b693e638ca894611b
| 407
|
py
|
Python
|
authorize/apis/base_api.py
|
aryeh/py-authorize
|
e0a2e8d9828efa2146b22cb855fa28d723913e41
|
[
"MIT"
] | 30
|
2015-03-13T01:31:52.000Z
|
2021-06-11T08:49:43.000Z
|
authorize/apis/base_api.py
|
aryeh/py-authorize
|
e0a2e8d9828efa2146b22cb855fa28d723913e41
|
[
"MIT"
] | 41
|
2015-01-30T20:01:05.000Z
|
2022-03-31T23:11:56.000Z
|
authorize/apis/base_api.py
|
aryeh/py-authorize
|
e0a2e8d9828efa2146b22cb855fa28d723913e41
|
[
"MIT"
] | 34
|
2015-01-11T20:22:03.000Z
|
2022-03-28T20:34:22.000Z
|
import colander
from authorize.exceptions import AuthorizeInvalidError
class BaseAPI(object):
def __init__(self, api):
self.api = api
self.config = api.config
def _deserialize(self, schema, params={}):
try:
deserialized = schema.deserialize(params)
except colander.Invalid as e:
raise AuthorizeInvalidError(e)
return deserialized
| 22.611111
| 54
| 0.65602
| 41
| 407
| 6.390244
| 0.609756
| 0.053435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27027
| 407
| 17
| 55
| 23.941176
| 0.882155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11766920d1f9e38bf6634e7585ed79d49348e3c1
| 1,237
|
py
|
Python
|
stable_baselines/trpo_mpi/run_mujoco.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
stable_baselines/trpo_mpi/run_mujoco.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
stable_baselines/trpo_mpi/run_mujoco.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
import stable_baselines.common.tf_util as tf_util
from mpi4py import MPI
from stable_baselines import logger
from stable_baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.trpo_mpi import TRPO
def train(env_id, num_timesteps, seed):
with tf_util.single_threaded_session():
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
env = make_mujoco_env(env_id, workerseed)
model = TRPO(
MlpPolicy,
env,
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
entcoeff=0.0,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
)
model.learn(total_timesteps=num_timesteps)
env.close()
def main():
args = mujoco_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| 28.767442
| 79
| 0.631366
| 160
| 1,237
| 4.575
| 0.4625
| 0.102459
| 0.103825
| 0.068306
| 0.051913
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032511
| 0.278901
| 1,237
| 42
| 80
| 29.452381
| 0.788117
| 0
| 0
| 0
| 0
| 0
| 0.006467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11767e77d6369d63f87f78e8e4f1ac800b3e2e86
| 929
|
py
|
Python
|
Day-16/part1.py
|
archanpatkar/advent2020
|
86065eb744c885ce0e29ea8228b8e8ebbd38c939
|
[
"MIT"
] | null | null | null |
Day-16/part1.py
|
archanpatkar/advent2020
|
86065eb744c885ce0e29ea8228b8e8ebbd38c939
|
[
"MIT"
] | null | null | null |
Day-16/part1.py
|
archanpatkar/advent2020
|
86065eb744c885ce0e29ea8228b8e8ebbd38c939
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from common import *
def parse(data):
data = list(map(lambda s: s.strip(),filter(lambda s: len(s) > 1,data.split("\n"))))
conds = {}
for i in range(20):
kv = data[i].split(":")
cs = kv[1].split("or")
conds[kv[0]] = ([int(v) for v in cs[0].split("-")],
[int(v) for v in cs[1].split("-")])
myticket = [int(n) for n in data[21].split(",")]
nearbytickets = [[int(n) for n in data[l].split(",")] for l in range(23,len(data))]
return (conds,myticket,nearbytickets)
data = aoci(parse);
p(data[0]);
count = 0
error_rate = 0
for tic in data[2]:
for val in tic:
flag = False
for field in data[0]:
c = data[0][field]
if bi(val,c[0][0],c[0][1]) or bi(val,c[1][0],c[1][1]):
flag = True
if not flag:
count += 1
error_rate += val
print(error_rate)
print(count)
| 28.151515
| 87
| 0.52099
| 154
| 929
| 3.123377
| 0.350649
| 0.049896
| 0.029106
| 0.033264
| 0.108108
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0.038806
| 0.278794
| 929
| 33
| 88
| 28.151515
| 0.679104
| 0
| 0
| 0
| 0
| 0
| 0.011828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11791aa64e3598994f4ffc54bf2c561214125d3a
| 1,931
|
py
|
Python
|
NewQshop/AppStore/Buyer/views.py
|
bestwishfang/FlaskFrameWork
|
e5f2af0b82be6d5b32febadc244a72aadceaa58b
|
[
"MIT"
] | null | null | null |
NewQshop/AppStore/Buyer/views.py
|
bestwishfang/FlaskFrameWork
|
e5f2af0b82be6d5b32febadc244a72aadceaa58b
|
[
"MIT"
] | 3
|
2020-04-30T15:16:56.000Z
|
2022-02-13T08:02:39.000Z
|
NewQshop/AppStore/Buyer/views.py
|
bestwishfang/FlaskFrameWork
|
e5f2af0b82be6d5b32febadc244a72aadceaa58b
|
[
"MIT"
] | null | null | null |
import time
import json
import datetime
from flask import request
from flask import render_template
from flask_restful import Resource
from . import buyer, apibuyer
from AppStore.models import Class_Info
@buyer.route('/buyer/index/')
def index():
return 'Hello World! This is buyer index.'
@buyer.route('/api/buyer/page/')
def page():
return render_template('buyer/page.html')
@buyer.route('/buyer/data/')
def demo_data():
return render_template('buyer/datapage.html')
@apibuyer.resource('/api/buyer/')
class BuyerApi(Resource):
def __init__(self, *args, **kwargs):
super(BuyerApi, self).__init__(*args, **kwargs)
self.ret = {
'code': 200,
'version': 1.0,
'frame': 'flask 1.1.1',
'data': []
}
def get(self):
class_all = Class_Info.query.all()
print(type(class_all)) # <class 'list'>
for obj in class_all:
obj_data = {
'class_num': obj.class_num,
'class_name': obj.class_name,
'entrance_time': obj.entrance_time.strftime('%Y-%m-%d'),
'college': obj.college,
}
# print(type(obj.entrance_time))
# new_date = obj.entrance_time.strftime('%Y-%m-%d')
# print(new_date)
# print(type(new_date))
self.ret['data'].append(obj_data)
# print(self.ret)
return self.ret
def post(self):
data = request.form
class_obj = Class_Info()
class_obj.class_num = data.get('class_num')
class_obj.class_name = data.get('class_name')
class_obj.entrance_time = data.get('entrance_time')
class_obj.college = data.get('college')
class_obj.save()
self.ret['data'] = '保存成功'
return self.ret
def put(self):
return self.ret
def delete(self):
return self.ret
| 25.407895
| 72
| 0.578975
| 239
| 1,931
| 4.493724
| 0.292887
| 0.052142
| 0.055866
| 0.044693
| 0.048417
| 0.048417
| 0.048417
| 0
| 0
| 0
| 0
| 0.00581
| 0.286898
| 1,931
| 75
| 73
| 25.746667
| 0.774147
| 0.077162
| 0
| 0.075472
| 0
| 0
| 0.139718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.150943
| false
| 0
| 0.150943
| 0.09434
| 0.45283
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1180dde3665390d3a54ac55203c8a6a39a04cbc1
| 1,993
|
py
|
Python
|
addpapers.py
|
tarrow/librarybase-pwb
|
6c86aba7cbcb6200bfade0170aea8be3593cafec
|
[
"MIT"
] | 1
|
2017-05-23T14:14:16.000Z
|
2017-05-23T14:14:16.000Z
|
addpapers.py
|
tarrow/librarybase-pwb
|
6c86aba7cbcb6200bfade0170aea8be3593cafec
|
[
"MIT"
] | 7
|
2015-12-09T10:14:37.000Z
|
2016-01-19T12:55:33.000Z
|
addpapers.py
|
tarrow/librarybase-pwb
|
6c86aba7cbcb6200bfade0170aea8be3593cafec
|
[
"MIT"
] | null | null | null |
import queryCiteFile
import librarybase
import pywikibot
from epmclib.getPMCID import getPMCID
from epmclib.exceptions import IDNotResolvedException
import queue
import threading
import time
def rununthreaded():
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for idx, citation in enumerate(citations[10513:]):
addpaper(idx, citation)
def runthreaded():
threads = []
for i in range(10):
t = threading.Thread(target=worker())
t.start()
threads.append(t)
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for citation in enumerate(citations[10513:]):
q.put(citation)
q.join()
for i in range(10):
q.put(None)
for t in threads:
t.join()
def worker():
while True:
idx, citation = q.get()
addpaper( idx, citation )
q.task_done()
def addpaper( idx, citation ):
start=time.time()
print(citation)
if citation is None:
return
print('trying to add {} number {}'.format(citation[5], idx))
site = pywikibot.Site("librarybase", "librarybase")
item = librarybase.JournalArticlePage(site)
pmcidobj = getPMCID(citation[5])
try:
pmcidobj.getBBasicMetadata()
except IDNotResolvedException:
print('Couldn\'t find in EPMC:' + citation[5])
return
metadata = pmcidobj.metadata
print("Got metadata in:" + str(time.time()-start))
if not item.articleAlreadyExists(metadata['pmcid']):
print('Item doesn\'t seem to exist. Setting metadata for: ' + metadata['pmcid'])
item.setMetaData(metadata)
print("set metadata in" + str(time.time()-start))
else:
print("{} already exists. Doing nothing".format(metadata['pmcid']))
q=queue.Queue()
rununthreaded()
| 29.746269
| 93
| 0.606121
| 208
| 1,993
| 5.802885
| 0.394231
| 0.045568
| 0.047225
| 0.062966
| 0.235294
| 0.159072
| 0.11599
| 0.11599
| 0
| 0
| 0
| 0.011888
| 0.282489
| 1,993
| 67
| 94
| 29.746269
| 0.832168
| 0
| 0
| 0.175439
| 0
| 0
| 0.081432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.140351
| 0
| 0.245614
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118141ccfae972e74c17e2b33370161c604fe8c4
| 2,491
|
py
|
Python
|
day18/script2.py
|
Moremar/advent_of_code_2019
|
572200ad8c24efd38fb3fac428d086bcd7090ca9
|
[
"Apache-2.0"
] | null | null | null |
day18/script2.py
|
Moremar/advent_of_code_2019
|
572200ad8c24efd38fb3fac428d086bcd7090ca9
|
[
"Apache-2.0"
] | null | null | null |
day18/script2.py
|
Moremar/advent_of_code_2019
|
572200ad8c24efd38fb3fac428d086bcd7090ca9
|
[
"Apache-2.0"
] | null | null | null |
from day18.script1 import parse_matrix, read_char_matrix, alpha_lower, solve_world
# We could use the same logic as part 1 with a state made of a distance and 4 (x, y) pairs (one for each bot)
# This works with examples, but there are too many combinations for the real input so it runs for very long
#
# To get it quicker, we will resolve the 4 sub-mazes independently, assuming we have all keys from other 3 mazes.
# Then we sum the 4 results.
#
# There could be cases where this logic does not work (basically if the keys from other mazes that we assume we have
# cannot be obtained in the order we assumed, because they require the current bot to get keys in a different order)
#
# I did not have such problematic cases with my input file and this logic gave the correct result.
def solve(world):
(world1, keys1, world2, keys2, world3, keys3, world4, keys4) = world
return solve_world(world1, keys1) \
+ solve_world(world2, keys2) \
+ solve_world(world3, keys3) \
+ solve_world(world4, keys4)
def parse(file_name):
matrix = read_char_matrix(file_name)
middle_i = (len(matrix) + 1) // 2
middle_j = (len(matrix[0]) + 1) // 2
# split into 4 sub-matrices
(mx1, mx2, mx3, mx4) = [], [], [], []
(keys1, keys2, keys3, keys4) = [], [], [], []
for i in range(middle_i):
row = []
for j in range(middle_j):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys1.append(matrix[i][j])
mx1.append(row)
for i in range(middle_i):
row = []
for j in range(middle_j, len(matrix[0])):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys2.append(matrix[i][j])
mx2.append(row)
for i in range(middle_i, len(matrix)):
row = []
for j in range(middle_j):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys3.append(matrix[i][j])
mx3.append(row)
for i in range(middle_i, len(matrix)):
row = []
for j in range(middle_j, len(matrix[0])):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys4.append(matrix[i][j])
mx4.append(row)
return parse_matrix(mx1), keys1, \
parse_matrix(mx2), keys2, \
parse_matrix(mx3), keys3, \
parse_matrix(mx4), keys4
if __name__ == '__main__':
print(solve(parse("data2.txt")))
| 34.123288
| 116
| 0.607788
| 378
| 2,491
| 3.902116
| 0.338624
| 0.056949
| 0.065085
| 0.075932
| 0.267119
| 0.255593
| 0.255593
| 0.255593
| 0.249492
| 0.249492
| 0
| 0.031561
| 0.27499
| 2,491
| 72
| 117
| 34.597222
| 0.785161
| 0.283019
| 0
| 0.425532
| 0
| 0
| 0.009583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.021277
| 0
| 0.106383
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1183ffbe924869fb7a592b3839706503e0ba51ef
| 1,029
|
py
|
Python
|
test/test_clamp.py
|
XiaoxuanZhangCM/igakit
|
71bc4237a561272f6189d067688c182aa705c5eb
|
[
"BSD-2-Clause"
] | 2
|
2022-03-21T09:38:55.000Z
|
2022-03-24T23:33:55.000Z
|
test/test_clamp.py
|
XiaoxuanZhangCM/igakit
|
71bc4237a561272f6189d067688c182aa705c5eb
|
[
"BSD-2-Clause"
] | 1
|
2022-03-24T22:50:35.000Z
|
2022-03-27T06:33:16.000Z
|
test/test_clamp.py
|
XiaoxuanZhangCM/igakit
|
71bc4237a561272f6189d067688c182aa705c5eb
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from igakit.cad import circle, Pi
def make_crv(p,u):
c = circle(radius=1, angle=Pi/2)
c.rotate(Pi/4)
c.elevate(0,p-2)
c.refine(0,u)
return c
def check_crv(c):
u0, u1 = c.breaks(0)[[0,-1]]
u = np.linspace(u0,u1,100)
x, y, z = c(u).T
r = np.hypot(x,y)
return np.allclose(r, 1)
def test_clamp():
for p in range(2,6):
for u in ([],[0.5],[1/3.0,2/3.0],[0.1,0.9]):
c = make_crv(p,u)
check_crv(c)
for continuity in range(c.degree[0]):
for side in (0, 1, None):
cc = c.copy()
cc.unclamp(0, continuity=continuity, side=side)
check_crv(cc)
cc.clamp(0, side=side)
check_crv(cc)
cc.clamp(0)
check_crv(cc)
assert np.allclose(cc.knots[0], c.knots[0])
assert np.allclose(cc.array, c.array)
if __name__ == '__main__':
test_clamp()
| 27.810811
| 67
| 0.477162
| 163
| 1,029
| 2.907975
| 0.368098
| 0.084388
| 0.063291
| 0.037975
| 0.109705
| 0.109705
| 0.109705
| 0.109705
| 0
| 0
| 0
| 0.060278
| 0.371234
| 1,029
| 36
| 68
| 28.583333
| 0.672334
| 0
| 0
| 0.09375
| 0
| 0
| 0.007775
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1186952effff0749fbfa4fc2d6b7b1c8935379fb
| 4,261
|
py
|
Python
|
autoload/vim_ext/vim_opt.py
|
nielsmadan/venom
|
87dee312fe8dbaa64fe29fba7a2ccd5f41a55b4a
|
[
"0BSD"
] | 1
|
2022-01-26T04:57:28.000Z
|
2022-01-26T04:57:28.000Z
|
autoload/vim_ext/vim_opt.py
|
nielsmadan/venom
|
87dee312fe8dbaa64fe29fba7a2ccd5f41a55b4a
|
[
"0BSD"
] | null | null | null |
autoload/vim_ext/vim_opt.py
|
nielsmadan/venom
|
87dee312fe8dbaa64fe29fba7a2ccd5f41a55b4a
|
[
"0BSD"
] | 1
|
2021-08-19T08:11:58.000Z
|
2021-08-19T08:11:58.000Z
|
import vim
_BOOL_OPTS = set(('allowrevins', 'altkeymap', 'antialias', 'autochdir', 'arabic', 'arabicshape',
'autoindent', 'autoread', 'autowrite', 'backup', 'ballooneval', 'binary',
'bioskey', 'bomb', 'buflisted', 'buftype', 'cindent', 'compatible', 'confirm',
'conskey', 'copyindent', 'cscoperelative', 'cscopetag', 'cscopeverbose',
'cursorbind', 'cursorcolumn', 'cursorline', 'delcombine', 'diff', 'digraph',
'edcompatible', 'endofline', 'equalalways', 'equalprg', 'errorbells', 'esckeys',
'expandtab', 'exrc', 'fkmap', 'foldenable', 'fsync', 'gdefault', 'guipty',
'hidden', 'hlsearch', 'hkmap', 'hkmapp', 'icon', 'ignorecase', 'imcmdline', 'imdisable',
'incsearch', 'infercase', 'insertmode', 'joinspaces', 'lazyredraw', 'linebreak', 'lisp',
'list', 'loadplugins', 'macatsui', 'magic', 'modeline', 'modifiable', 'modified',
'more', 'mouse', 'mousefocus', 'mousehide', 'number', 'opendevice', 'paste',
'preserveindent', 'previewwindow', 'prompt',
))
_NUM_OPTS = set(('aleph', 'balloondelay', 'cmdheight', 'cmdwinheight', 'columns', 'concellevel',
'cscopepathcomp', 'cscopetagorder', 'foldcolumn', 'foldlevel', 'foldlevelstart',
'foldminlines', 'foldnestmax', 'guiheadroom', 'history', 'iminsert', 'imsearch',
'laststatus', 'lines', 'linespace', 'matchtime', 'maxcombine', 'maxfuncdepth',
'maxmem', 'maxmempattern', 'maxmemtot', 'menuitems', 'modelines', 'mousetime',
'mzquantum', 'numberwidth', 'previewheight', 'pumheight',
))
_STR_OPTS = set(('ambiwidth', 'background', 'backspace', 'backupcopy', 'backupdir', 'backupext',
'backupskip', 'balloonexpr', 'breakat', 'browsedir', 'bufhidden', 'casemap',
'cdpath', 'cedit', 'charconvert', 'cinkeys', 'cinoptions', 'cinwords',
'clipboard', 'colorcolumn', 'comments', 'commentstring', 'complete',
'completefunc', 'completeopt', 'concealcursor', 'cpoptions', 'cryptmethod',
'cscopeprg', 'cscopequickfix', 'debug', 'define', 'dictionary', 'diffexpr',
'diffopt', 'directory', 'display', 'eadirection', 'encoding', 'errorfile',
'errorformat', 'eventignore', 'fileencoding', 'fileencodings', 'fileformat',
'fileformats', 'filetype', 'fillchars', 'foldclose', 'foldexpr', 'foldignore',
'foldmarker', 'foldmethod', 'foldopen', 'foldtext', 'formatoptions',
'formatlistpat', 'formatprg', 'formatexpr', 'grepformat', 'grepprg',
'guicursor', 'guifont', 'guifontset', 'guifontwide', 'guioptions',
'guitablabel', 'guitabtooltip', 'helpfile', 'helpheight', 'helplang',
'highlight', 'iconstring', 'imactivatekey', 'include', 'includeexpr', 'indentexpr',
'indentkeys', 'isfname', 'isindent', 'iskeyword', 'isprint', 'key', 'keymap',
'keymodel', 'keywordprg', 'langmap', 'langmenu', 'lispwords', 'listchars',
'makeef', 'makeprg', 'matchpairs', 'mkspellmem', 'mousemodel', 'mouseshape',
'nrformats', 'omnifunc', 'operatorfunc', 'osfiletype', 'paragraphs', 'pastetoggle',
'patchexpr', 'patchmode', 'path', 'printdevice', 'printencoding', 'printexpr',
'printfont', 'printheader', 'printmbcharset', 'printmbfont', 'printoptions',
'quoteescape',
))
class _opt(object):
def __getattr__(self, name):
# print "TRYING TO GET %s" % name
if name in _BOOL_OPTS:
return vim.eval('&' + name) == '1'
elif name in _NUM_OPTS:
return int(vim.eval('&' + name), 0)
elif name in _STR_OPTS:
return vim.eval('&' + name)
def __setattr__(self, name, val):
# print "TRYING TO SET %s TO %s" % (name, val)
if name in _BOOL_OPTS:
if val:
vim.command('set %s' % name)
else:
vim.command('set no%s' % name)
vim.opt = _opt()
| 59.180556
| 106
| 0.553157
| 312
| 4,261
| 7.477564
| 0.814103
| 0.008573
| 0.014145
| 0.010287
| 0.030004
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000642
| 0.268716
| 4,261
| 71
| 107
| 60.014085
| 0.748074
| 0.017836
| 0
| 0.086207
| 0
| 0
| 0.485892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.017241
| 0
| 0.12069
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1188f7d771d6606445861a8c6c54d6fe5f5f1785
| 40,198
|
py
|
Python
|
statestream/utils/shared_memory.py
|
boschresearch/statestream
|
3ea93b2e0434cfaf5d546f37b2068dc0a0b8c281
|
[
"Apache-2.0",
"MIT"
] | 9
|
2019-02-21T14:25:26.000Z
|
2021-07-21T08:14:32.000Z
|
statestream/utils/shared_memory.py
|
VolkerFischer/statestream
|
3ea93b2e0434cfaf5d546f37b2068dc0a0b8c281
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
statestream/utils/shared_memory.py
|
VolkerFischer/statestream
|
3ea93b2e0434cfaf5d546f37b2068dc0a0b8c281
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-03-04T03:17:00.000Z
|
2019-03-04T03:17:00.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import importlib
import SharedArray
from statestream.utils.helper import is_scalar_shape
from statestream.utils.shared_memory_layout import SharedMemoryLayout
from statestream.meta.network import get_item_type
from statestream.meta.network import S2L
from statestream.meta.neuron_pool import np_shm_layout, np_init
from statestream.meta.synapse_pool import sp_shm_layout, sp_init
def shared_layout(net, param):
"""Generates shared-memory layout from net and param.
"""
layout = {}
for t in ["np", "sp", "plast", "if"]:
for i,I in net[S2L(t)].items():
# Begin with empty layout.
layout[i] = {}
# Empty tmem layout structure. Rest will be filled during shm creation.
layout[i]["tmem"] = []
for tmem in range(len(param["core"]["temporal_memory"])):
layout[i]["tmem"].append({"parameter": {}, "variables": {}})
if t == "plast":
layout[i]["tmem"][tmem]["updates"] = {}
# Get item shm data layout.
if t == "np":
layout[i].update(np_shm_layout(i, net, param))
elif t == "sp":
layout[i].update(sp_shm_layout(i, net, param))
elif t == "plast":
plast_shm_layout \
= getattr(importlib.import_module("statestream.meta.plasticities." + I["type"]),
"plast_shm_layout")
layout[i].update(plast_shm_layout(i, net, param))
layout[i]["updates"] = {}
for par in I["parameter"]:
if par[1] not in layout[i]["updates"]:
layout[i]["updates"][par[1]] = {}
shml = layout[par[1]]["parameter"][par[2]]
layout[i]["updates"][par[1]][par[2]] = shml
elif t == "if":
if_shm_layout \
= getattr(importlib.import_module("statestream.interfaces.process_if_" + I["type"]),
"if_shm_layout")
layout[i].update(if_shm_layout(i, net, param))
return layout
class SharedMemory(object):
def __init__(self, net, param, session_id=None, force_id=None):
self.net = net
self.param = param
# Get list of all existing shared memory arrays.
shm_list = SharedArray.list()
shm_list_name = []
for i in range(len(shm_list)):
if sys.version[0] == "2":
shm_list_name.append(shm_list[i].name)
elif sys.version[0] == "3":
shm_list_name.append(shm_list[i].name.decode("utf-8"))
# Initially start with invalid session id.
self.session_id = None
# Begin with empty structure holding the entire layout.
self.dat = {}
# Estimate of bytes reserved in shared memory.
self.log_lines = []
self.log_bytes = []
# Build layout.
# ---------------------------------------------------------------------
self.layout = shared_layout(self.net, self.param)
# Dependent on given session_id initialize shared memory.
# ---------------------------------------------------------------------
if session_id is None:
if force_id is None:
# Determine next free session id.
for tmp_session_id in range(2**10):
id_taken = False
for i in range(len(shm_list)):
if shm_list_name[i].find("statestream." + str(tmp_session_id) + ".") != -1:
id_taken = True
break
# Take the first free session id and break.
if not id_taken:
self.session_id = tmp_session_id
session_name = "statestream." + str(self.session_id) + "."
break
else:
self.session_id = force_id
session_name = "statestream." + str(self.session_id) + "."
# Allocate all shared memory.
# ---------------------------------------------------------
for t in ["np", "sp", "plast", "if"]:
for i, I in self.net[S2L(t)].items():
# Allocate process identifiers.
shm_name = session_name + "core.proc_id." + i
SharedArray.create(shm_name, 1, dtype=np.int32)
# Allocate shm for neuron pool states.
if t == "np":
shm_name = session_name + "net." + i + ".state"
SharedArray.create(shm_name,
self.layout[i]["state"].shape,
dtype=self.layout[i]["state"].dtype)
# Allocate also tmem for np states.
for tmem in range(len(param["core"]["temporal_memory"])):
self.layout[i]["tmem"][tmem]["state"] = self.layout[i]["state"]
tmem_shm_name = session_name + "net.tmem." + str(tmem) + "." + i + ".state"
SharedArray.create(tmem_shm_name,
self.layout[i]["state"].shape,
dtype=self.layout[i]["state"].dtype)
# Allocate parameters and variables (incl. tmem).
for T in ["parameter", "variables"]:
shm_name = session_name + "net." + i + "." + T
for d,d_l in self.layout[i][T].items():
dat_name = shm_name + "." + d
if is_scalar_shape(d_l.shape):
SharedArray.create(dat_name, 1, dtype=d_l.dtype)
else:
SharedArray.create(dat_name, d_l.shape, dtype=d_l.dtype)
# Allocate also tmem for parameters / variables.
for tmem in range(len(param["core"]["temporal_memory"])):
self.layout[i]["tmem"][tmem][T][d] = d_l
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + "." + T + "." + d
if is_scalar_shape(d_l.shape):
SharedArray.create(tmem_shm_name, 1, dtype=d_l.dtype)
else:
SharedArray.create(tmem_shm_name, d_l.shape, dtype=d_l.dtype)
# Allocate shm for plasticity updates (incl. tmem).
for i,I in self.net["plasticities"].items():
shm_name = session_name + "net." + i + ".updates."
for par in I["parameter"]:
shml = self.layout[par[1]]["parameter"][par[2]]
dat_name = shm_name + par[0] + "." + par[1] + "." + par[2]
if is_scalar_shape(shml.shape):
SharedArray.create(dat_name, 1, dtype=shml.dtype)
else:
SharedArray.create(dat_name, shml.shape, dtype=shml.dtype)
# Allocate also tmem for updates.
for tmem in range(len(param["core"]["temporal_memory"])):
if par[1] not in self.layout[i]["tmem"][tmem]["updates"]:
self.layout[i]["tmem"][tmem]["updates"][par[1]] = {}
self.layout[i]["tmem"][tmem]["updates"][par[1]][par[2]] = shml
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + ".updates." \
+ par[0] + "." + par[1] + "." + par[2]
if is_scalar_shape(shml.shape):
SharedArray.create(tmem_shm_name, 1, dtype=shml.dtype)
else:
SharedArray.create(tmem_shm_name, shml.shape, dtype=shml.dtype)
else:
# Set session name for shm.
session_name = "statestream." + str(session_id) + "."
# Check if shared memory for this session_id was already created.
for i in range(len(shm_list)):
if shm_list_name[i].find(session_name) != -1:
self.session_id = session_id
break
assert (self.session_id != None), \
"Error: SharedMemory() Given session_id was not found: " \
+ str(session_id) + " " + session_name
# Attach all shared memory.
# ---------------------------------------------------------------------
self.proc_id = {}
for t in ["np", "sp", "plast", "if"]:
for i,I in self.net[S2L(t)].items():
self.dat[i] = {}
# Begin with empty list of dicts for temporal memory.
self.dat[i]["tmem"] = [{} for tmem in range(len(param["core"]["temporal_memory"]))]
# Process relevant memory.
shm_name = session_name + "core.proc_id." + i
self.proc_id[i] = SharedArray.attach(shm_name)
# Network data shared memory for neuron pool states.
if t == "np":
shm_name = session_name + "net." + i + ".state"
self.dat[i]["state"] = SharedArray.attach(shm_name)
self.log_lines += [str(i) + ".state"]
self.log_bytes += [self.dat[i]["state"].nbytes]
# Attach also tmem for np states.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_dat_name = session_name + "net.tmem." + str(tmem) + "." + i + ".state"
self.dat[i]["tmem"][tmem]["state"] = SharedArray.attach(tmem_dat_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) + ".state"]
self.log_bytes += [self.dat[i]["tmem"][tmem]["state"].nbytes]
# Network data shared memory for plasticity updates.
if t == "plast":
# Begin with empty dict also for temporal memory.
self.dat[i]["updates"] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem]["updates"] = {}
shm_name = session_name + "net." + i + ".updates."
for par in I["parameter"]:
# First time add parameter for specific item (incl. tmem).
if par[1] not in self.dat[i]["updates"]:
self.dat[i]["updates"][par[1]] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem]["updates"][par[1]] = {}
# Specify shm update id.
dat_name = shm_name + par[0] + "." + par[1] + "." + par[2]
# Attach shm.
self.dat[i]["updates"][par[1]][par[2]] = SharedArray.attach(dat_name)
self.log_lines += [str(i) + ".updates." + str(par[1]) + "." + str(par[2])]
self.log_bytes += [self.dat[i]["updates"][par[1]][par[2]].nbytes]
# Attach also tmem for updates.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + ".updates." \
+ par[0] + "." + par[1] + "." + par[2]
self.dat[i]["tmem"][tmem]["updates"][par[1]][par[2]] \
= SharedArray.attach(tmem_shm_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) \
+ ".updates." + str(par[1]) + "." + str(par[2])]
self.log_bytes += [self.dat[i]["tmem"][tmem]["updates"][par[1]][par[2]].nbytes]
# Network data shared memory for variables and parameter.
for T in ["parameter", "variables"]:
# Begin with empty dict also for temporal memory.
self.dat[i][T] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem][T] = {}
# Determine shm id item "prefix".
shm_name = session_name + "net." + i + "." + T
# Loop over all vars/pars of this item.
for d,d_l in self.layout[i][T].items():
dat_name = shm_name + "." + d
self.dat[i][T][d] = SharedArray.attach(dat_name)
self.log_lines += [str(i) + "." + str(T) + "." + str(d)]
self.log_bytes += [self.dat[i][T][d].nbytes]
# Attach also tmem for parameter / variables.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + "." + T + "." + d
self.dat[i]["tmem"][tmem][T][d] = SharedArray.attach(tmem_shm_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) + "." \
+ str(T) + "." + str(d)]
self.log_bytes += [self.dat[i]["tmem"][tmem][T][d].nbytes]
def delete(self):
"""Method to free statestream shared memory of the particular session.
"""
if self.session_id != None:
shm_list = SharedArray.list()
shm_list_name = []
for i in range(len(shm_list)):
if sys.version[0] == "2":
shm_list_name.append(shm_list[i].name)
elif sys.version[0] == "3":
shm_list_name.append(shm_list[i].name.decode("utf-8"))
for i in range(len(shm_list)):
if shm_list_name[i].find("statestream." + str(self.session_id) + ".") != -1:
SharedArray.delete(shm_list_name[i])
def add_sys_client(self, client_param):
"""Create shared memory for a single system client.
"""
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.' \
+ str(client_param['name']) + '.'
# Create and attach client specific shared memory.
for T in ['parameter', 'variables']:
if T in client_param:
for pv,PV in client_param[T].items():
shm_name = client_shm_name + T + '.' + pv
try:
SharedArray.create(shm_name, PV['shape'], dtype=np.float32)
except:
dat = SharedArray.attach(shm_name)
if dat.shape != PV['shape']:
print('\nError: Shared memory: Tried to create already existing memory: ' + shm_name)
def update_sys_client(self):
"""Update this instance of shared memory to existing clients.
"""
# Determine all clients, currently in shared memory.
clients = {}
shm_list = SharedArray.list()
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.'
for shm_name_raw in shm_list:
if sys.version[0] == "2":
shm_name = shm_name_raw.name
elif sys.version[0] == "3":
shm_name = shm_name_raw.name.decode("utf-8")
if shm_name.startswith(client_shm_name):
shm_name_split = shm_name.split('.')
client_name = shm_name_split[3]
if client_name not in clients:
clients[client_name] = {
'parameter': {},
'variables': {}
}
clients[client_name][shm_name_split[4]][shm_name_split[5]] \
= shm_name
# Update client shared memroy dat and layout.
for c,C in clients.items():
if c not in self.dat:
self.dat[c] = {
'parameter': {},
'variables': {}
}
self.layout[c] = {
'parameter': {},
'variables': {}
}
for t,T in C.items():
for d,D in T.items():
self.dat[c][t][d] = SharedArray.attach(D)
self.layout[c][t][d] = SharedMemoryLayout('np',
self.dat[c][t][d].shape,
self.dat[c][t][d].dtype,
0.0)
# Determine all items in dat / layout which are not in shared memory.
# Remove deprecated shared memory from layout and dat.
remove_items = []
for i,I in self.layout.items():
if i not in clients and i not in self.net['neuron_pools'] \
and i not in self.net['synapse_pools'] \
and i not in self.net['plasticities'] \
and i not in self.net['interfaces']:
remove_items.append(i)
for i in remove_items:
self.dat.pop(i)
self.layout.pop(i)
def remove_sys_client(self, client_name):
"""Remove shared memory for system client.
"""
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.' \
+ str(client_name) + '.'
# Delete shared memory.
for T in ['parameter', 'variables']:
for d,d_l in self.layout[client_name][T].items():
shm_name = client_shm_name + T + '.' + str(d)
try:
SharedArray.delete(shm_name)
except:
print("\nERROR: Unable to delete non-existing shared memory: " + str(shm_name) + "\n")
def pprint_list(self, what=""):
"""Return a list of lines containing shm info about what.
"""
lines = []
w = what.split(".")
if len(w) > 1:
if len(w[1]) == 1:
if w[1] == "n":
i_type = "neuron_pools"
elif w[1] == "s":
i_type = "synapse_pools"
elif w[1] == "p":
i_type = "plasticities"
elif w[1] == "i":
i_type = "interfaces"
else:
return []
if what in ["shm", "shm."]:
lines.append("[n]euron pools")
lines.append("[s]ynapse pools")
lines.append("[p]lasticities")
lines.append("[i]nterfaces")
if len(w) == 2:
# shm.i_type
if w[1] != "":
cntr = 0
for i in self.net[i_type]:
if cntr == 0:
# Append new line.
lines.append(" " + i.ljust(18))
else:
# Append to existing line.
lines[-1] = lines[-1] + i.ljust(18)
if cntr < 3:
cntr += 1
else:
cntr = 0
elif len(w) == 3:
# shm.i_type.item_name
if w[1] != "":
cntr = 0
for i in self.net[i_type]:
if i.startswith(w[2]):
if cntr == 0:
# Append new line.
lines.append(" " + i.ljust(18))
else:
# Append to existing line.
lines[-1] = lines[-1] + i.ljust(18)
if cntr < 3:
cntr += 1
else:
cntr = 0
elif len(w) == 4:
# shm.i_type.item_name.data_type
if w[1] != "":
if w[2] in self.net[i_type]:
# Assuming all classes of data begin
# with a different letter.
if w[3] == "":
for e in self.dat[w[2]]:
lines.append(" [" + e[0] + "]" + e[1:])
else:
dat_type = "x"
if w[3][0] in ["v", "p"]:
if w[3] == "v":
dat_type = "variables"
else:
dat_type = "parameter"
for vp in self.dat[w[2]][dat_type]:
lines.append(" " + vp)
elif w[3].startswith("s"):
if w[3] == "s":
lines.append(" shape: " + str(self.layout[w[2]]["state"].shape))
lines.append(" type: " + str(self.layout[w[2]]["state"].dtype))
nbytes = self.dat[w[2]]["state"].nbytes
lines.append(" memory: " + str(nbytes) + " B")
if w[3].startswith("s[") and w[3][-1] == "]":
# Get data.
s = eval("self.dat[w[2]]['state']" + w[3][1:])
if len(s.shape) == 0:
lines.append(" value: " + str(s))
if len(s.shape) == 1:
for i in range(min(s.shape[0], 16)):
lines.append(str(i).ljust(4) + " " + str(s[i]))
if s.shape[0] >= 16:
lines.append("...")
elif len(w) == 5:
if w[1] != "":
if w[2] in self.net[i_type]:
dat_type = "x"
if w[3][0] in ["v", "p"]:
if w[3] == "v":
dat_type = "variables"
else:
dat_type = "parameter"
for vp in self.dat[w[2]][dat_type]:
if vp.startswith(w[4]) and len(w[4]) < len(vp):
lines.append(" " + vp)
if vp == w[4]:
lines.append(" shape: " + str(self.layout[w[2]][dat_type][vp].shape))
lines.append(" type: " + str(self.layout[w[2]][dat_type][vp].dtype))
nbytes = self.dat[w[2]][dat_type][vp].nbytes
lines.append(" memory: " + str(nbytes) + " B")
if w[4].startswith(vp) and w[4][-1] == "]" and "[" in w[4]:
# Get data.
s = eval("self.dat[w[2]][dat_type][vp]" + w[4][len(vp):])
if len(s.shape) == 0:
lines.append(" value: " + str(s))
if len(s.shape) == 1:
for i in range(min(s.shape[0], 16)):
lines.append(str(i).ljust(4) + " " + str(s[i]))
if s.shape[0] >= 16:
lines.append("...")
return lines
def init(self, what=[], mode=None):
"""Method to recusively initialize a subset of the network.
what:
[] Initialize everything.
["state"] Initialize all states.
["parameter"] Initialize all parameter.
["variables"] Initialize all variables.
["updates"] Initialize all updates.
[np_id, "state"] Initialize state of neuron pool np_id.
[item_id, Initialize parameter par_id of item item_id.
"parameter",
par_id]
[item_id, Initialize variable var_id of item item_id.
"variables",
var_id]
[plast_id, Initialize updates [tar_id, par_id] of plasticity plast_id.
"updates",
tar_id,
par_id]
"""
# Do not initialize meta-variables.
if len(what) >= 1:
if what[0] in self.dat \
and what[0] not in self.net['neuron_pools'] \
and what[0] not in self.net['synapse_pools'] \
and what[0] not in self.net['plasticities'] \
and what[0] not in self.net['interfaces']:
return
# Adjust mode in some cases.
if isinstance(mode, list):
if "external_models" in self.net:
# In case of external model init, set mode here to none.
if mode[0] in self.net["external_models"]:
mode = None
# Determine item to be set and its type.
item_id = None
item_type = None
if len(what) >= 1:
item_id = what[0]
if item_id == "state":
# Initialize all states.
for n in self.net["neuron_pools"]:
self.init([n, "state"], mode=mode)
# Done with initialization.
return None
elif item_id in ["parameter", "variables"]:
# Initialize all parameters or variables.
for i in self.dat:
for d, d_l in self.layout[i][item_id].items():
self.init([i, item_id, d], mode=mode)
# Done with initialization.
return None
elif item_id == "updates":
# Initialize all updates.
for i in self.net["plasticities"]:
for target_i in self.dat[i]["updates"]:
for target_p in self.dat[i]["updates"][target_i]:
self.init([i, "updates", target_i, target_p], mode=mode)
# Done with initialization.
return None
else:
# Assume what[0] is an item.
# Determine item type.
item_type = get_item_type(self.net, item_id)
else:
# len ought to be zero, so everthing should be set.
self.init(["state"], mode=0.0)
self.init(["parameter"], mode=mode)
self.init(["variables"], mode=0.0)
self.init(["updates"], mode=0.0)
# Done with initialization.
return None
# Dependent on len of what, determine what is to be set.
set_flag = False
if len(what) == 1:
# Re-init a single item.
if item_type == "np":
pass
elif item_type == "sp":
pass
elif item_type == "plast":
pass
elif item_type == "if":
pass
# TODO
elif len(what) == 2:
if what[1] in ["state"]:
dat_name = "__state__"
dat_layout = self.layout[item_id]["state"]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
elif len(what) == 3:
if what[1] in ["parameter", "variables"]:
dat_name = what[2]
dat_layout = self.layout[item_id][what[1]][what[2]]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
elif len(what) == 4:
if what[1] == "updates":
dat_name = [what[2], [what[3]]]
dat_layout = self.layout[item_id]["updates"][what[2]][what[3]]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
else:
raise NameError("SharedMemory.init() Unexpected what of length " + str(len(what)) + ".")
# Set if something is to be set.
if set_flag:
if item_type == "np":
value = np_init(self.net, item_id, dat_name, dat_layout, mode=mode)
elif item_type == "sp":
value = sp_init(self.net, item_id, dat_name, dat_layout, mode=mode)
elif item_type == "plast":
# Determine plasticity type.
plast_type = self.net["plasticities"][item_id]["type"]
# Get correct plasticity initializer.
try:
plast_init \
= getattr(importlib.import_module("statestream.meta.plasticities." + plast_type),
"plast_init")
value = plast_init(self.net, item_id, dat_name, dat_layout, mode=mode)
except:
value = None
elif item_type == "if":
# Determine interface type.
if_type = self.net["interfaces"][item_id]["type"]
# Get correct plasticity initializer.
try:
if_init = getattr(importlib.import_module("statestream.interfaces." + if_type),
"if_init")
value = if_init(self.net, item_id, dat_name, dat_layout, mode=mode)
except:
value = None
# Fallback if invalid value.
if value is None:
value = self.init_fallback(item_id, dat_name, dat_layout, mode=mode)
# Finally set value.
self.set_shm(what, value)
def init_fallback(self, item_id, dat_name, dat_layout, mode=None):
"""Fallback to default initialization.
"""
# Get local dictionary.
if item_id in self.net["neuron_pools"]:
p = self.net["neuron_pools"][item_id]
elif item_id in self.net["synapse_pools"]:
p = self.net["synapse_pools"][item_id]
elif item_id in self.net["plasticities"]:
p = self.net["plasticities"][item_id]
elif item_id in self.net["interfaces"]:
p = self.net["interfaces"][item_id]
# Dependent on scalar or not, try to initialize.
if is_scalar_shape(dat_layout.shape):
# Scalar values.
if mode is None:
dat_value = np.array(p.get(dat_name, dat_layout.default),
dtype=dat_layout.dtype)
else:
dat_value = np.array(mode, dtype=dat_layout.dtype)
if mode in ["one", 1.0]:
dat_value = np.array(1.0, dtype=dat_layout.dtype)
else:
dat_value = np.array(0.0, dtype=dat_layout.dtype)
else:
# If mode is None, set to default.
if mode is None:
dat_value = np.ones(dat_layout.shape, dtype=dat_layout.dtype)
try:
dat_value *= dat_layout.default
except:
dat_value *= 0
print("Warning: No valid initialization for " + str(dat_name) \
+ " of item " + str(item_id) + ". Set to zero.")
else:
# Dependent on specified mode set value.
if mode in ["zero", 0.0]:
dat_value = np.zeros(dat_layout.shape, dtype=dat_layout.dtype)
elif mode in ["one", 1.0]:
dat_value = np.ones(dat_layout.shape, dtype=dat_layout.dtype)
# Return initialized value.
return dat_value
def set_shm(self, which, value):
"""Method to set a specific array in shared memory to value.
"""
if len(which) == 2:
if self.layout[which[0]][which[1]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]].min)
if self.layout[which[0]][which[1]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]].max)
shape = self.layout[which[0]][which[1]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][0] = value
elif value.shape == self.dat[which[0]][which[1]].shape:
self.dat[which[0]][which[1]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]].shape) \
+ " for " + str(which))
elif len(which) == 3:
if self.layout[which[0]][which[1]][which[2]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]].min)
if self.layout[which[0]][which[1]][which[2]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]].max)
shape = self.layout[which[0]][which[1]][which[2]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]].shape:
self.dat[which[0]][which[1]][which[2]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]].shape) \
+ " for " + str(which))
elif len(which) == 4:
if self.layout[which[0]][which[1]][which[2]][which[3]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]][which[3]].min)
if self.layout[which[0]][which[1]][which[2]][which[3]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]][which[3]].max)
shape = self.layout[which[0]][which[1]][which[2]][which[3]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][which[3]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]][which[3]].shape:
self.dat[which[0]][which[1]][which[2]][which[3]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]][which[3]].shape) \
+ " for " + str(which))
elif len(which) == 5:
if self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].min)
if self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].max)
shape = self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]][which[3]][which[4]].shape:
self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]][which[3]][which[4]].shape) \
+ " for " + str(which))
else:
raise NameError("SharedMemory.set_shm() expected item \
specification of length 2-5, got " + str(len(which)))
def get_shm(self, which):
"""Method to get a specific array in shared memory.
"""
if len(which) == 2:
shape = self.layout[which[0]][which[1]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][0]
else:
return self.dat[which[0]][which[1]][:]
elif len(which) == 3:
shape = self.layout[which[0]][which[1]][which[2]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][0]
else:
return self.dat[which[0]][which[1]][which[2]][:]
elif len(which) == 4:
shape = self.layout[which[0]][which[1]][which[2]][which[3]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][which[3]][0]
else:
return self.dat[which[0]][which[1]][which[2]][which[3]][:]
elif len(which) == 5:
shape = self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][0]
else:
return self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][:]
else:
raise NameError("SharedMemory.get_shm() expected item \
specification of length 2-5, got " + str(len(which)))
def update_net(self, net):
"""Update the given net (its parameters, etc.) from shared memory.
"""
# Search all parameters of the network in shared memory.
for i in self.dat:
# Determine item type.
i_type = get_item_type(self.net, i)
if i_type is not None:
for p in self.dat[i]['parameter']:
# TODO: For now update of metas is not done.
try:
if p in net[S2L(i_type)][i] and is_scalar_shape(self.layout[i]['parameter'][p].shape):
net[S2L(i_type)][i][p] = float(self.dat[i]['parameter'][p][0])
except:
pass
| 48.962241
| 113
| 0.449898
| 4,472
| 40,198
| 3.933587
| 0.077818
| 0.026263
| 0.030015
| 0.032744
| 0.597863
| 0.541186
| 0.499233
| 0.44807
| 0.399864
| 0.362231
| 0
| 0.017502
| 0.414399
| 40,198
| 820
| 114
| 49.021951
| 0.729779
| 0.133838
| 0
| 0.439739
| 0
| 0
| 0.081343
| 0.004881
| 0
| 0
| 0
| 0.002439
| 0.001629
| 1
| 0.019544
| false
| 0.008143
| 0.022801
| 0
| 0.071661
| 0.013029
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118a041ae6d96652625804e1d8039c4ff86c7d07
| 3,957
|
py
|
Python
|
core/views.py
|
MichelAtieno/Rogue-Nation
|
c4d78b42b5e6312043de2308a591951d0ba297b8
|
[
"Unlicense"
] | null | null | null |
core/views.py
|
MichelAtieno/Rogue-Nation
|
c4d78b42b5e6312043de2308a591951d0ba297b8
|
[
"Unlicense"
] | 11
|
2020-06-05T22:42:25.000Z
|
2022-03-11T23:58:46.000Z
|
core/views.py
|
MichelAtieno/Rogue-Nation
|
c4d78b42b5e6312043de2308a591951d0ba297b8
|
[
"Unlicense"
] | null | null | null |
from django.db.models import Count, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from .models import NewsItem, SignUp, Artist, Athlete,Category
# Create your views here.
def search(request):
queryset = NewsItem.objects.all()
query = request.GET.get('q')
if query:
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'queryset': queryset
}
return render(request, 'search_results.html', context)
def get_category_count():
queryset = NewsItem.objects.values('categories__title').annotate(Count('categories__title'))
return queryset
def get_category():
queryset = Athlete.objects.values('categories__title')
return queryset
def home(request):
queryset = NewsItem.objects.filter(featured=True)
latest = NewsItem.objects.order_by('-date')[0:3]
if request.method == "POST":
email = request.POST["email"]
new_signup = SignUp()
new_signup.email = email
new_signup.save()
context = {
'object_list': queryset,
'latest': latest
}
return render(request, "home_page.html", context)
def news(request):
category_count = get_category_count()
# print(category_count)
most_recent = NewsItem.objects.order_by('-date')[0:6]
news = NewsItem.objects.all()
paginator = Paginator(news, 6)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'queryset': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count
}
return render(request, "news.html", context)
def post(request, id):
news = get_object_or_404(NewsItem, id=id)
context = {
'news': news
}
return render(request, 'post.html', context)
def news_letter(request):
return render(request, 'news_letter.html')
def get_artist(request):
artists = Artist.objects.all()
context = {
'artists': artists
}
return render(request, 'artists.html', context)
def artist_profile(request, id):
artist = get_object_or_404(Artist, id=id)
queryset = NewsItem.objects.all()
query = artist.name
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'artist': artist,
'queryset': queryset
}
return render(request, 'artist_profile.html', context)
def get_athlete(request):
category = get_category()
athletes = Athlete.objects.all()
all_categories = Category.objects.all()
context = {
'athletes':athletes,
'category': category,
'all_categories': all_categories
}
return render(request, 'athletes.html', context)
def athlete_profile(request, id):
athlete = get_object_or_404(Athlete, id=id)
queryset = NewsItem.objects.all()
query = athlete.name
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'athlete': athlete,
'queryset': queryset
}
return render(request, 'athlete_profile.html', context)
def category_profile(request, id):
one_category = get_object_or_404(Category, id=id)
cat_queryset = Athlete.objects.all()
cat_query = one_category.title
cat_queryset = cat_queryset.filter(Q(categories__title__icontains=cat_query)).distinct()
context = {
'one_category': one_category,
'queryset': cat_queryset
}
return render(request, 'category.html', context)
| 28.06383
| 96
| 0.668183
| 452
| 3,957
| 5.637168
| 0.181416
| 0.047096
| 0.074568
| 0.027473
| 0.225667
| 0.147174
| 0.125981
| 0.098509
| 0.098509
| 0.098509
| 0
| 0.006794
| 0.218853
| 3,957
| 140
| 97
| 28.264286
| 0.817535
| 0.011372
| 0
| 0.252174
| 0
| 0
| 0.098029
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104348
| false
| 0
| 0.034783
| 0.008696
| 0.243478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118a9196e211cb95ce33a333ef7a6481cddafdd3
| 2,133
|
py
|
Python
|
process-invoices/update_invoice_statuses.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | null | null | null |
process-invoices/update_invoice_statuses.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | 16
|
2021-07-23T20:46:29.000Z
|
2022-03-10T19:34:10.000Z
|
process-invoices/update_invoice_statuses.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import glob
import sys
import requests
from defusedxml import ElementTree as ET
sys.path.append("..")
from llama.alma import Alma_API_Client
import llama.config as config
TODAY = datetime.date.today()
count_total_invoices = 0
count_invoices_updated = 0
count_invoice_errors = 0
# Update empty invoice XML file that gets posted to Alma to use today's date
tree = ET.parse("empty_invoice.xml")
root = tree.getroot()
voucher_date = root.find(".//voucher_date")
voucher_date.text = TODAY.strftime("%Y-%m-%dT12:%M:%SZ")
tree.write("output-files/empty.xml")
# Update invoices status in Alma for all invoice IDs in
# output-files/invoice_ids_YYYYMMDDhhmmss.txt and
# output-files/invoice_special_YYYYMMDDhhmmss.txt
alma_client = Alma_API_Client(config.get_alma_api_key("ALMA_API_ACQ_READ_WRITE_KEY"))
alma_client.set_content_headers("application/xml", "application/xml")
today_string = TODAY.strftime("%Y%m%d")
invoice_files = glob.glob(f"output-files/invoice_ids_{today_string}*.txt")
special_invoice_files = glob.glob(f"output-files/invoice_special_{today_string}*.txt")
with open(invoice_files[0]) as f:
invoice_ids = f.readlines()
with open(special_invoice_files[0]) as f:
invoice_ids.extend(f.readlines())
for item in invoice_ids:
count_total_invoices += 1
invoice_id = item.strip()
print("Marking invoice as Paid in Alma")
try:
paid_xml = alma_client.mark_invoice_paid(
invoice_id, "output-files/empty.xml")
print(f"Invoice #{invoice_id} marked as Paid in Alma\n")
with open(f"output-files/paid_{invoice_id}.xml", "w") as f:
f.write(paid_xml)
count_invoices_updated += 1
except requests.HTTPError as e:
print(f"Error marking invoice #{invoice_id} as paid in Alma")
print(f"{e.response.text}\n")
print("'update_invoice_statuses' process complete")
print("Summary:")
print(f" Total invoices processed: {count_total_invoices}")
print(f" Invoices marked as paid in Alma: {count_invoices_updated}")
print(
f" Invoices not successfully marked as paid in Alma: {count_invoice_errors}"
)
| 35.55
| 86
| 0.735115
| 322
| 2,133
| 4.65528
| 0.307453
| 0.051368
| 0.026684
| 0.040027
| 0.12942
| 0.117412
| 0.086724
| 0.052035
| 0
| 0
| 0
| 0.004972
| 0.15143
| 2,133
| 59
| 87
| 36.152542
| 0.823204
| 0.105016
| 0
| 0
| 0
| 0
| 0.34979
| 0.152311
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148936
| 0
| 0.148936
| 0.191489
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118a963b9511f86ed9bf75aa58347aecde3de782
| 4,117
|
py
|
Python
|
src/converter/convert_mobilenetv2.py
|
xiongzhiyao/pytorch-segmentation
|
a13b1aa1b316d06f050deef29d5be8b6e99460e7
|
[
"MIT"
] | 359
|
2018-11-22T04:13:19.000Z
|
2022-03-08T09:05:47.000Z
|
src/converter/convert_mobilenetv2.py
|
xiongzhiyao/pytorch-segmentation
|
a13b1aa1b316d06f050deef29d5be8b6e99460e7
|
[
"MIT"
] | 21
|
2018-12-07T22:37:09.000Z
|
2021-01-22T02:18:28.000Z
|
src/converter/convert_mobilenetv2.py
|
xiongzhiyao/pytorch-segmentation
|
a13b1aa1b316d06f050deef29d5be8b6e99460e7
|
[
"MIT"
] | 73
|
2018-11-22T06:16:54.000Z
|
2021-03-30T18:26:47.000Z
|
import argparse
from pathlib import Path
import tensorflow as tf
import torch
from models.net import SPPNet
def convert_mobilenetv2(ckpt_path, num_classes):
def conv_converter(pt_layer, tf_layer_name, depthwise=False, bias=False):
if depthwise:
pt_layer.weight.data = torch.Tensor(
reader.get_tensor(f'{tf_layer_name}/depthwise_weights').transpose(2, 3, 0, 1))
else:
pt_layer.weight.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/weights').transpose(3, 2, 0, 1))
if bias:
pt_layer.bias.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/biases'))
def bn_converter(pt_layer, tf_layer_name):
pt_layer.bias.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/beta'))
pt_layer.weight.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/gamma'))
pt_layer.running_mean.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/moving_mean'))
pt_layer.running_var.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/moving_variance'))
def block_converter(pt_layer, tf_layer_name):
if hasattr(pt_layer, 'expand'):
conv_converter(pt_layer.expand.conv, f'{tf_layer_name}/expand')
bn_converter(pt_layer.expand.bn, f'{tf_layer_name}/expand/BatchNorm')
conv_converter(pt_layer.depthwise.conv, f'{tf_layer_name}/depthwise', depthwise=True)
bn_converter(pt_layer.depthwise.bn, f'{tf_layer_name}/depthwise/BatchNorm')
conv_converter(pt_layer.project.conv, f'{tf_layer_name}/project')
bn_converter(pt_layer.project.bn, f'{tf_layer_name}/project/BatchNorm')
reader = tf.train.NewCheckpointReader(ckpt_path)
model = SPPNet(num_classes, enc_type='mobilenetv2', dec_type='maspp')
# MobileNetV2
conv_converter(model.encoder.conv, 'MobilenetV2/Conv')
bn_converter(model.encoder.bn, 'MobilenetV2/Conv/BatchNorm')
block_converter(model.encoder.block0, 'MobilenetV2/expanded_conv')
block_converter(model.encoder.block1, 'MobilenetV2/expanded_conv_1')
block_converter(model.encoder.block2, 'MobilenetV2/expanded_conv_2')
block_converter(model.encoder.block3, 'MobilenetV2/expanded_conv_3')
block_converter(model.encoder.block4, 'MobilenetV2/expanded_conv_4')
block_converter(model.encoder.block5, 'MobilenetV2/expanded_conv_5')
block_converter(model.encoder.block6, 'MobilenetV2/expanded_conv_6')
block_converter(model.encoder.block7, 'MobilenetV2/expanded_conv_7')
block_converter(model.encoder.block8, 'MobilenetV2/expanded_conv_8')
block_converter(model.encoder.block9, 'MobilenetV2/expanded_conv_9')
block_converter(model.encoder.block10, 'MobilenetV2/expanded_conv_10')
block_converter(model.encoder.block11, 'MobilenetV2/expanded_conv_11')
block_converter(model.encoder.block12, 'MobilenetV2/expanded_conv_12')
block_converter(model.encoder.block13, 'MobilenetV2/expanded_conv_13')
block_converter(model.encoder.block14, 'MobilenetV2/expanded_conv_14')
block_converter(model.encoder.block15, 'MobilenetV2/expanded_conv_15')
block_converter(model.encoder.block16, 'MobilenetV2/expanded_conv_16')
# SPP
conv_converter(model.spp.aspp0.conv, 'aspp0')
bn_converter(model.spp.aspp0.bn, 'aspp0/BatchNorm')
conv_converter(model.spp.image_pooling.conv, 'image_pooling')
bn_converter(model.spp.image_pooling.bn, 'image_pooling/BatchNorm')
conv_converter(model.spp.conv, 'concat_projection')
bn_converter(model.spp.bn, 'concat_projection/BatchNorm')
# Logits
conv_converter(model.logits, 'logits/semantic', bias=True)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_path')
parser.add_argument('num_classes', type=int)
parser.add_argument('output_path')
args = parser.parse_args()
ckpt_path = args.ckpt_path
num_classes = args.num_classes
output_path = Path(args.output_path)
output_path.parent.mkdir()
model = convert_mobilenetv2(ckpt_path, num_classes)
torch.save(model.state_dict(), output_path)
| 45.744444
| 116
| 0.747389
| 552
| 4,117
| 5.268116
| 0.211957
| 0.125172
| 0.137208
| 0.151995
| 0.282669
| 0.178817
| 0.126204
| 0.126204
| 0.126204
| 0.126204
| 0
| 0.023001
| 0.134078
| 4,117
| 89
| 117
| 46.258427
| 0.792707
| 0.005344
| 0
| 0
| 0
| 0
| 0.251528
| 0.211929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.073529
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118bb9cc42104087368b917dcb655edae791e512
| 2,624
|
py
|
Python
|
extract-subimages-videos.py
|
rzaluska/fcnn-conferences
|
509946a4d342451f29e7b8706b6ff46b0af20f36
|
[
"MIT"
] | 1
|
2018-04-07T05:55:48.000Z
|
2018-04-07T05:55:48.000Z
|
extract-subimages-videos.py
|
rzaluska/fcnn-conferences
|
509946a4d342451f29e7b8706b6ff46b0af20f36
|
[
"MIT"
] | null | null | null |
extract-subimages-videos.py
|
rzaluska/fcnn-conferences
|
509946a4d342451f29e7b8706b6ff46b0af20f36
|
[
"MIT"
] | null | null | null |
# script for extracting patches from video frames suitable for neural network
# training
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
from PIL import Image
import sys
import os
import glob
from PIL import Image
from os.path import basename, splitext
import numpy as np
def acceptable(a):
if np.average(a) > 0.95 * 255:
return False
return True
overlap = 8
source_path = './reducted-conferences-videos-equations/'
destination_path = './conferences-videos-equations-samples-512/'
for file_name in glob.glob(source_path+ "*.jpg"):
name_without_extenstion = splitext(basename(file_name))[0]
gt_file_path = source_path + name_without_extenstion + ".gt.jpg"
print(file_name)
print(gt_file_path)
source_image = load_img(file_name, grayscale=False)
try:
groud_img = load_img(gt_file_path, grayscale=True)
except FileNotFoundError:
#groud_img = Image.new('RGB', (source_image.size[0], source_image.size[1]), (255, 255, 255))
continue
size_list = [512]
for size_x in size_list:
for size_y in size_list:
subimage_size = (size_x, size_y)
num_of_subimages_horizontal = source_image.size[0] // (subimage_size[0] // overlap)
num_of_subimages_vertical = source_image.size[1] // (subimage_size[1] // overlap)
rest_h = source_image.size[0] - num_of_subimages_horizontal * (subimage_size[0] // overlap)
rest_v = source_image.size[1] - num_of_subimages_vertical * (subimage_size[1] // overlap)
for i in range(num_of_subimages_horizontal):
for j in range(num_of_subimages_vertical):
x = i * (subimage_size[0] // overlap)
y = j * (subimage_size[1] // overlap)
w = x + (subimage_size[0])
h = y + (subimage_size[1])
crop_rect = (x,y,w,h)
if w > source_image.size[0] or h > source_image.size[1]:
continue
chunk_file_name = "{dir}{name}-{sizex}-{sizey}-{i}-{j}".format(dir=destination_path, i=i, j=j, name=name_without_extenstion, sizex=size_x, sizey=size_y)
gt_sub_image = groud_img.crop(crop_rect)
if not acceptable(img_to_array(gt_sub_image)):
continue
print(chunk_file_name)
gt_sub_image.save(chunk_file_name + ".gt.jpg")
sub_image = source_image.crop(crop_rect)
sub_image.save(chunk_file_name+ ".jpg")
| 39.164179
| 172
| 0.626905
| 352
| 2,624
| 4.389205
| 0.272727
| 0.071197
| 0.07767
| 0.041424
| 0.059547
| 0.032362
| 0
| 0
| 0
| 0
| 0
| 0.020376
| 0.270579
| 2,624
| 66
| 173
| 39.757576
| 0.786834
| 0.066692
| 0
| 0.1
| 0
| 0
| 0.057716
| 0.048301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.16
| 0
| 0.22
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118d551ca0c80c0e51a840c724d05bb79e776a14
| 2,113
|
py
|
Python
|
src/extract_cdi/__init__.py
|
igorgbr/extract_cdi
|
66d8aa56d0099c04ef491908ab246389b79fdc7e
|
[
"MIT"
] | null | null | null |
src/extract_cdi/__init__.py
|
igorgbr/extract_cdi
|
66d8aa56d0099c04ef491908ab246389b79fdc7e
|
[
"MIT"
] | null | null | null |
src/extract_cdi/__init__.py
|
igorgbr/extract_cdi
|
66d8aa56d0099c04ef491908ab246389b79fdc7e
|
[
"MIT"
] | null | null | null |
import os
import time
import json
from random import random
from datetime import datetime
import pandas as pd
import seaborn as sns
import requests
class SearchAndExtractData(object):
def __init__(self, file: str, graph_name: str) -> None:
self.file = file
self.graph_name = graph_name
def create_csv(self) -> None:
ENDPOINT = "ConsultarTaxaDICetip.aspx"
URL = f"https://www2.cetip.com.br/ConsultarTaxaDi/{ENDPOINT}"
# Criando a variável data e hora
for _ in range(0, 10):
data_e_hora = datetime.now()
data = datetime.strftime(data_e_hora, "%Y/%m/%d")
hora = datetime.strftime(data_e_hora, "%H:%M:%S")
# Captando a taxa CDI do site da B3
try:
response = requests.get(URL)
response.raise_for_status()
except requests.HTTPError:
print("Dado não encontrado, continuando.")
cdi = None
except Exception as exc:
print("Erro, parando a execução.")
raise exc
else:
dado = json.loads(response.text)
cdi = float(dado["taxa"].replace(",", "."))
# Verificando se o arquivo "taxa-cdi.csv" existe
if os.path.exists(f"./{self.file}") is False:
with open(
file=f"./{self.file}", mode="w", encoding="utf8"
) as fp:
fp.write("data,hora,taxa\n")
# Salvando dados no arquivo "taxa-cdi.csv"
with open(file=f"./{self.file}", mode="a", encoding="utf8") as fp:
fp.write(f"{data},{hora},{cdi}\n")
time.sleep(2 + (random() - 0.5))
print("Sucesso")
def create_graph(self) -> None:
# Extraindo as colunas hora e taxa
df = pd.read_csv(f"./{self.file}")
# Salvando no grafico
grafico = sns.lineplot(x=df["hora"], y=df["taxa"])
_ = grafico.set_xticklabels(labels=df["hora"], rotation=90)
grafico.get_figure().savefig(f"{self.graph_name}.png")
| 32.507692
| 78
| 0.549456
| 260
| 2,113
| 4.376923
| 0.480769
| 0.042179
| 0.031634
| 0.036907
| 0.128295
| 0.084359
| 0.043937
| 0
| 0
| 0
| 0
| 0.00838
| 0.322291
| 2,113
| 64
| 79
| 33.015625
| 0.786313
| 0.097018
| 0
| 0
| 0
| 0
| 0.155708
| 0.035245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.177778
| 0
| 0.266667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
118ff67c852ea38f217b5c566e77f4efa9b7fe30
| 9,368
|
py
|
Python
|
FirmsLocations/Retrieve/density_assignation.py
|
tgquintela/Firms_locations
|
476680cbc3eb1308811633d24810049e215101a0
|
[
"MIT"
] | null | null | null |
FirmsLocations/Retrieve/density_assignation.py
|
tgquintela/Firms_locations
|
476680cbc3eb1308811633d24810049e215101a0
|
[
"MIT"
] | null | null | null |
FirmsLocations/Retrieve/density_assignation.py
|
tgquintela/Firms_locations
|
476680cbc3eb1308811633d24810049e215101a0
|
[
"MIT"
] | null | null | null |
"""
Assign geographically density value to a points.
"""
from scipy.spatial import KDTree
from scipy.spatial.distance import cdist
from scipy.stats import norm
from scipy.optimize import minimize
import numpy as np
def general_density_assignation(locs, parameters, values=None, locs2=None):
"Density assignation function."
# Creation of the kdtree for retrieving neighs
if locs2 is None:
leafsize = int(locs.shape[0]/float(10.))
kdtree = KDTree(locs, leafsize=leafsize)
else:
leafsize = int(locs2.shape[0]/float(10.))
kdtree = KDTree(locs2, leafsize=leafsize)
parameters = preparation_parameters(parameters)
M = compute_measure(locs=locs, kdtree=kdtree, values=values, **parameters)
## Recurrent measure (TODO)[better before with the population?]
return M
# method, params (weitghted count, ...)
# method, params (linear, trapezoid,...)
###############################################################################
############################### Compute measure ###############################
###############################################################################
def compute_measure(locs, kdtree, max_r, values, method, params):
"Retrieve the neighs indices and the neighs descriptors and weights."
## Computation of the measure based in the distances as weights.
M = np.zeros(locs.shape[0])
for i in range(locs):
neighs, dist = get_self_neighs_i(locs, kdtree, max_r, i)
M[i] = compute_measure_i(neighs, dist, values[neighs], method, params)
return M
def get_self_neighs_i(locs, kdtree, max_r, i):
"Retrieving neighs and distance."
loc = locs[i, :]
neighs = kdtree.query_ball_point(loc, max_r)
neighs.remove(i)
dist = cdist(locs[i, :], locs[neighs, :])
return neighs, dist
def compute_measure_i(neighs, dist, values, method, params):
"Swither function between different possible options to compute density."
if method == 'weighted_count':
measure = compute_measure_wcount(neighs, dist, params)
elif method == 'weighted_avg':
measure = compute_measure_wavg(neighs, dist, params)
return measure
def compute_measure_wcount(neighs, dist, params):
"""Measure to compute density only based on the weighted count of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights)
return measure
def compute_measure_wavg(neighs, dist, values, params):
"""Measure to compute density based on the weighted average of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights * values)
return measure
###############################################################################
############################# Distance to weights #############################
###############################################################################
def from_distance_to_weights(dist, method, params):
"Function which transforms the distance given to weights."
if method == 'linear':
weights = dist2weights_linear(dist, **params)
elif method == 'Trapezoid':
weights = dist2weights_trapez(dist, **params)
elif method == 'inverse_prop':
weights = dist2weights_invers(dist, **params)
elif method == 'exponential':
weights = dist2weights_exp(dist, **params)
elif method == 'gaussian':
weights = dist2weights_gauss(dist, **params)
elif method == 'surgaussian':
weights = dist2weights_surgauss(dist, **params)
elif method == 'sigmoid':
weights = dist2weights_sigmoid(dist, **params)
return weights
def dist2weights_linear(dist, max_r, max_w=1, min_w=0):
"Linear distance weighting."
weights = (max_w - dist)*((max_w-min_w)/float(max_r))+min_w
return weights
def dist2weights_trapez(dist, max_r, r2, max_w=1, min_w=0):
"Trapezoidal distance weighting."
if type(dist) == np.ndarray:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
weights[dist <= r2] = max_w
else:
if dist <= r2:
weights = max_w
else:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
return weights
def dist2weights_invers(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Inverse distance weighting."
if min_w == 0:
tau = 1.
else:
tau = (max_w/min_w-1)/max_r
if rescale:
floor_f = 1./float(1.+tau*max_r)
weights = max_w/(1.-floor_f) * (1./float(1.+tau*dist)-floor_f)
else:
weights = max_w/float(1.+tau*dist)
return weights
def dist2weights_exp(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Exponential distanve weighting."
if min_w == 0:
C = 1.
else:
C = -np.log(min_w/max_w)
if rescale:
weights = max_w/(1.-np.exp(-C)) * np.exp(-C*dist/max_r)
else:
weights = max_w * np.exp(-C*dist/max_r)
return weights
def dist2weights_gauss(dist, max_r, max_w=1, min_w=1e-3, S=None, rescale=True):
"Gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.pdf(0)-norm.pdf(max_r, scale=S))
weights = A*norm.pdf(dist, scale=S)
else:
A = max_w/norm.pdf(0)
weights = A*norm.pdf(dist, scale=S)
return weights
def dist2weights_surgauss(dist, max_r, max_w=1, min_w=1e-3, S=None,
rescale=True):
"Survival gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.sf(0, scale=S)-norm.sf(max_r, scale=S))
weights = A*(norm.sf(dist, scale=S)-norm.sf(max_r, scale=S))
else:
A = max_w/norm.sf(0)
weights = A*norm.sf(dist, scale=S)
return weights
def dist2weights_sigmoid(dist, max_r, max_w=1, min_w=1e-3, r_char=0, B=None,
rescale=True):
"Sigmoid-like distance weighting"
C = r_char*max_r
if B is None:
B = set_scale_sigmoid(max_r, max_w, min_w, r_char)
sigmoid = lambda x: 1./(1.+B*np.exp(x+C))
if rescale:
floor_f = sigmoid(max_r)
weights = max_w/(sigmoid(0)-floor_f)*(sigmoid(dist)-floor_f)
else:
weights = 1./(1.+B*np.exp(dist+C))
return weights
###############################################################################
############################# Set scale functions #############################
###############################################################################
def set_scales_kernel(method, max_r, max_w, min_w, r_char=None):
"Switcher function for set scale functions."
if method == 'surgaussian':
scale = set_scale_surgauss(max_r, max_w, min_w)
elif method == 'gaussian':
scale = set_scale_gauss(max_r, max_w, min_w)
elif method == 'sigmoid':
scale = set_scale_sigmoid(max_r, max_w, min_w, r_char)
return scale
def set_scale_surgauss(max_r, max_w, min_w):
"Set the scale factor of the surgauss kernel."
A = max_w/norm.sf(0)
scale = minimize(lambda x: (A*norm.sf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_gauss(max_r, max_w, min_w):
"Set the scale factor of the gauss kernel."
A = max_w/norm.pdf(0)
scale = minimize(lambda x: (A*norm.pdf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_sigmoid(max_r, max_w, min_w, r_char):
"Set scale for sigmoidal functions."
C = r_char*max_r
sigmoid_c = lambda B: 1./(1.+B*np.exp(max_r+C)) - min_w
B = minimize((sigmoid_c)**2,
x0=np.array([1]), method='BFGS',
tol=1e-8, bounds=(0, None))
return B
###############################################################################
############################# Preparation inputs #############################
###############################################################################
def preparation_parameters(parameters):
"Function to put into coherence the selected parameters."
method = parameters['params']['method']
params = parameters['params']['params']
if method == 'gaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_gauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'surgaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_surgauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'sigmoid':
bool_scale = 'B' in params
if not bool_scale:
scale = set_scale_sigmoid(params['max_r'], params['max_w'],
params['min_w'], params['r_char'])
parameters['params']['params']['B'] = scale
return parameters
| 34.315018
| 79
| 0.570773
| 1,214
| 9,368
| 4.228171
| 0.135914
| 0.032729
| 0.02182
| 0.024937
| 0.473602
| 0.404052
| 0.338204
| 0.289499
| 0.265147
| 0.226768
| 0
| 0.013344
| 0.224061
| 9,368
| 272
| 80
| 34.441176
| 0.692805
| 0.137383
| 0
| 0.385417
| 0
| 0
| 0.121266
| 0
| 0
| 0
| 0
| 0.003676
| 0
| 1
| 0.098958
| false
| 0
| 0.026042
| 0
| 0.223958
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1190f1b038385208afbc477445817bdebba87dc5
| 561
|
py
|
Python
|
nexus/pylon/sources/specific/biorxiv.py
|
leoll2/hyperboria
|
30a0ae466b290208f690560160ef1f5c16e4a744
|
[
"Unlicense"
] | null | null | null |
nexus/pylon/sources/specific/biorxiv.py
|
leoll2/hyperboria
|
30a0ae466b290208f690560160ef1f5c16e4a744
|
[
"Unlicense"
] | null | null | null |
nexus/pylon/sources/specific/biorxiv.py
|
leoll2/hyperboria
|
30a0ae466b290208f690560160ef1f5c16e4a744
|
[
"Unlicense"
] | null | null | null |
from typing import AsyncIterable
from nexus.pylon.sources.base import (
DoiSource,
PreparedRequest,
)
class BiorxivSource(DoiSource):
base_url = 'https://dx.doi.org'
async def resolve(self) -> AsyncIterable[PreparedRequest]:
async with self.get_resolve_session() as session:
url = f'{self.base_url}/{self.doi}'
async with session.get(
url,
timeout=self.resolve_timeout
) as resp:
yield PreparedRequest(method='get', url=str(resp.url) + '.full.pdf')
| 28.05
| 84
| 0.616756
| 63
| 561
| 5.412698
| 0.52381
| 0.041056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27451
| 561
| 19
| 85
| 29.526316
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.099822
| 0.046346
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1193d717cb8b9aa0587bf651757ef62435cc6b62
| 3,645
|
py
|
Python
|
addLatLon.py
|
amnh-sciviz/amnh-time-machine
|
c75c75c6bd3ee91d81cb4b0181a292de27eab9c8
|
[
"MIT"
] | null | null | null |
addLatLon.py
|
amnh-sciviz/amnh-time-machine
|
c75c75c6bd3ee91d81cb4b0181a292de27eab9c8
|
[
"MIT"
] | null | null | null |
addLatLon.py
|
amnh-sciviz/amnh-time-machine
|
c75c75c6bd3ee91d81cb4b0181a292de27eab9c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
from difflib import SequenceMatcher
import os
from pprint import pprint
import sys
import lib.eac_utils as eac
import lib.io_utils as io
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="data/eac_dates.csv", help="File with EAC data (from collectDates.py)")
parser.add_argument('-countries', dest="COUNTRIES_FILE", default="data/countries.csv", help="File with countries data")
parser.add_argument('-states', dest="STATES_FILE", default="data/states.csv", help="File with states data")
parser.add_argument('-keys', dest="KEYS", default="name,dateplace,dateevent", help="List of keys to check in order of priority (first is highest priority)")
parser.add_argument('-out', dest="OUTPUT_FILE", default="data/eac_expeditions.csv", help="File for output")
a = parser.parse_args()
MIN_MATCH_LEN = 4
placeKeys = a.KEYS.strip().split(",")
keysToAdd = ["lon", "lat", "match"]
# Make sure output dirs exist
io.makeDirectories(a.OUTPUT_FILE)
_, countries = io.readCsv(a.COUNTRIES_FILE)
_, states = io.readCsv(a.STATES_FILE)
# https://stackoverflow.com/questions/18715688/find-common-substring-between-two-strings
def findLongestCommonSubstring(string1, string2):
match = SequenceMatcher(None, string1, string2).find_longest_match(0, len(string1), 0, len(string2))
if match:
return string1[match.a: match.a + match.size]
else:
return None
def listIntersection(a, b):
return list(set(a).intersection(set(b)))
def isValidMatch(candidate, match):
if match is None:
return False
candidate = candidate.lower()
match = match.lower()
valid = True
stopWords = ["and", "the", "to", "of", "united", "american", "island", "islands", "north", "south", "southern", "northern", "east", "west", "eastern", "western", "central", "columbia", "african"]
aList = [word.strip('[]()') for word in candidate.split()]
bList = [word.strip('[]()') for word in match.split()]
intersections = listIntersection(aList, bList)
intersections = [word for word in intersections if word not in stopWords and len(word) > 3]
if len(intersections) <= 0:
valid = False
return valid
def findPlace(value, pool):
value = value.lower()
matches = []
for i, candidate in enumerate(pool):
match = findLongestCommonSubstring(value, candidate["name"].lower())
if isValidMatch(value, candidate["name"]):
matches.append((i,match))
matches = [m for m in matches if len(m[1]) >= MIN_MATCH_LEN]
if len(matches) > 0:
matches = sorted(matches, key=lambda m:-len(m[1]))
place = pool[matches[0][0]]
print("%s = %s" % (value, place["name"]))
return (place["longitude"], place["latitude"], place["name"])
else:
return (None, None, None)
# retrieve expeditions
expeditions = []
fieldNames, eacData = io.readCsv(a.INPUT_FILE)
for key in keysToAdd:
if key not in fieldNames:
fieldNames.append(key)
# eacData = [e for e in eacData if e["type"]=="Expedition"]
entryCount = len(eacData)
for i, entry in enumerate(eacData):
if entry["type"]=="Expedition":
for key in placeKeys:
lon, lat, match = findPlace(entry[key], countries+states)
if match:
eacData[i].update({
"lon": lon,
"lat": lat,
"match": match
})
break
sys.stdout.write('\r')
sys.stdout.write("%s%%" % round(1.0*(i+1)/entryCount*100,2))
sys.stdout.flush()
io.writeCsv(a.OUTPUT_FILE, eacData, fieldNames)
| 35.735294
| 199
| 0.652126
| 471
| 3,645
| 4.989384
| 0.341826
| 0.019149
| 0.03617
| 0.019149
| 0.015319
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011259
| 0.195885
| 3,645
| 101
| 200
| 36.089109
| 0.790515
| 0.060631
| 0
| 0.051282
| 0
| 0
| 0.160082
| 0.014047
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.089744
| 0.012821
| 0.230769
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11958f77466ed28b0ddf34aab10041bc97b2f55d
| 912
|
py
|
Python
|
Solutions/problem07.py
|
WalrusCow/euler
|
b5bfa67c87c7043f521cde32e7212c0fffdbacd9
|
[
"MIT"
] | null | null | null |
Solutions/problem07.py
|
WalrusCow/euler
|
b5bfa67c87c7043f521cde32e7212c0fffdbacd9
|
[
"MIT"
] | null | null | null |
Solutions/problem07.py
|
WalrusCow/euler
|
b5bfa67c87c7043f521cde32e7212c0fffdbacd9
|
[
"MIT"
] | null | null | null |
# Project Euler Problem 7
# Created on: 2012-06-13
# Created by: William McDonald
import math
import time
# Short list of prime numbers under 20
primeList = [2, 3, 5, 7, 9, 11, 13, 17, 19]
# Returns True if n is prime, otherwise False
def isPrime(n):
prime = True
for i in primeList:
if n % i == 0:
prime = False
break
if i > math.floor(math.sqrt(n)):
break
return prime
# Returns the nth prime number
def getPrime(n):
if n < len(primeList):
return primeList[n - 1]
else:
p = primeList[len(primeList) - 1] + 2
while len(primeList) <= n:
if isPrime(p):
primeList.append(p)
p += 2
return primeList[len(primeList) - 1]
start = time.time()
ans = getPrime(10001)
cost = time.time() - start
print(ans)
print("Time: {}".format(cost))
| 24
| 46
| 0.551535
| 124
| 912
| 4.056452
| 0.508065
| 0.095427
| 0.083499
| 0.087475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058043
| 0.338816
| 912
| 38
| 47
| 24
| 0.776119
| 0.202851
| 0
| 0.074074
| 0
| 0
| 0.011696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.259259
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119622f084f9dfff43411b649a9c89be1e105982
| 1,820
|
py
|
Python
|
cogs/ban.py
|
QuentiumYT/QuentiumBot
|
1673d24d93f13f464b1175424529c4d58abb5c00
|
[
"MIT"
] | 9
|
2019-11-14T10:12:00.000Z
|
2021-12-17T13:05:40.000Z
|
cogs/ban.py
|
QuentiumYT/QuentiumBot
|
1673d24d93f13f464b1175424529c4d58abb5c00
|
[
"MIT"
] | null | null | null |
cogs/ban.py
|
QuentiumYT/QuentiumBot
|
1673d24d93f13f464b1175424529c4d58abb5c00
|
[
"MIT"
] | 4
|
2020-08-20T21:24:52.000Z
|
2021-12-17T13:05:17.000Z
|
import discord
from discord.ext import commands
from QuentiumBot import HandleData, get_translations
# Basic command configs
cmd_name = "ban"
tran = get_translations()
aliases = [] if not tran[cmd_name]["fr"]["aliases"] else tran[cmd_name]["fr"]["aliases"].split("/")
class BanAdminRights(commands.Cog):
"""Ban command in Administration Rights section"""
def __init__(self, client):
self.client = client
@commands.command(
name=cmd_name,
aliases=aliases,
pass_context=True
)
@commands.guild_only()
async def ban_cmd(self, ctx, *, member: discord.Member = None):
# Get specific server data
if isinstance(ctx.channel, discord.TextChannel):
data = await HandleData.retrieve_data(self, ctx.message.guild)
lang_server = data[0]
else:
lang_server = "en"
cmd_tran = tran[cmd_name][lang_server]
# Doesn't respond to bots
if not ctx.message.author.bot == True:
# Check user perms
if not ctx.message.author.guild_permissions.ban_members:
return await ctx.send(cmd_tran["msg_perm_ban_user"].format(ctx.message.author.name))
# Check bot perms
if not ctx.message.guild.me.guild_permissions.ban_members:
return await ctx.send(cmd_tran["msg_perm_ban_bot"])
# No member, aborting
if not member:
return await ctx.send(cmd_tran["msg_mention_user"].format(ctx.message.author.name))
# Ban the member
await member.ban()
embed = discord.Embed(color=0xFF1111)
embed.description = cmd_tran["msg_user_baned"].format(member.name)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(BanAdminRights(client))
| 36.4
| 100
| 0.637912
| 227
| 1,820
| 4.947137
| 0.365639
| 0.053428
| 0.05699
| 0.040071
| 0.28228
| 0.186999
| 0.133571
| 0.108638
| 0.108638
| 0.108638
| 0
| 0.004438
| 0.257143
| 1,820
| 49
| 101
| 37.142857
| 0.826183
| 0.101099
| 0
| 0
| 0
| 0
| 0.053571
| 0
| 0
| 0
| 0.004926
| 0
| 0
| 1
| 0.057143
| false
| 0.028571
| 0.085714
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119816fffb1e07970d7a5eddf0acda8930b0e4b4
| 3,057
|
py
|
Python
|
tpp_tensorflow/models/semisparse.py
|
gfrogat/tpp_tensorflow
|
711dd8cc0a8155ce6b6e5663afb2331b55748d30
|
[
"MIT"
] | null | null | null |
tpp_tensorflow/models/semisparse.py
|
gfrogat/tpp_tensorflow
|
711dd8cc0a8155ce6b6e5663afb2331b55748d30
|
[
"MIT"
] | null | null | null |
tpp_tensorflow/models/semisparse.py
|
gfrogat/tpp_tensorflow
|
711dd8cc0a8155ce6b6e5663afb2331b55748d30
|
[
"MIT"
] | null | null | null |
from tensorflow.keras import Model, layers, regularizers
class SemiSparseInput(Model):
def __init__(self, params):
super(SemiSparseInput, self).__init__()
# Correctly handle SELU
dropout = layers.AlphaDropout if params.activation == "selu" else layers.Dropout
kernel_init = (
"lecun_normal" if params.activation == "selu" else params.kernel_init
)
kernel_reg = (
regularizers.l2(params.reg_l2_rate)
if params.reg_l2_rate is not None
else None
)
self.input_dropout_maccs_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_maccs_fp",
)
self.input_maccs_fp = layers.Dense(
256,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_maccs_fp",
)
self.input_dropout_rdkit_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_rdkit_fp",
)
self.input_rdkit_fp = layers.Dense(
2048,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_rdkit_fp",
)
self.input_dropout_pubchem_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_pubchem_fp",
)
self.input_pubchem_fp = layers.Dense(
1024,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_pubchem_fp",
)
self.input_shed = layers.Dense(
8,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_shed",
)
self.input_dropout_cats2d = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_cats2d",
)
self.input_cats2d = layers.Dense(
32,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_cats2d",
)
def call(self, features, training=False):
x1 = self.input_dropout_maccs_fp(features["maccs_fp"])
x1 = self.input_maccs_fp(x1)
x2 = self.input_dropout_rdkit_fp(features["rdkit_fp"])
x2 = self.input_rdkit_fp(x2)
x3 = self.input_dropout_pubchem_fp(features["pubchem_fp"])
x3 = self.input_pubchem_fp(x3)
x4 = self.input_shed(features["shed"])
x5 = self.input_dropout_cats2d(features["cats2d"])
x5 = self.input_cats2d(x5)
x = layers.concatenate([x1, x2, x3, x4, x5], axis=1)
return x
| 31.515464
| 88
| 0.59928
| 330
| 3,057
| 5.212121
| 0.187879
| 0.139535
| 0.074419
| 0.093023
| 0.543023
| 0.433721
| 0.433721
| 0.433721
| 0.433721
| 0.433721
| 0
| 0.020564
| 0.315996
| 3,057
| 96
| 89
| 31.84375
| 0.802009
| 0.006869
| 0
| 0.2875
| 0
| 0
| 0.069216
| 0.022413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.0125
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119876ff369ecd32448c59ea7ad56ae2b54cfef2
| 4,628
|
py
|
Python
|
AT/neo4j_functions.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
AT/neo4j_functions.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
AT/neo4j_functions.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
# Testing for neo4j query functions
from neo4j import GraphDatabase, basic_auth
# setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Function that can take the intersection of multiple symptom queries
# Fabricate detection skill output (list of dictionaries)
symptom1, symptom2, symptom3 = {}, {}, {}
symptom1['measurement'], symptom1['relationship'] = 'ppO2', 'Exceeds_LWL'
symptom2['measurement'], symptom2['relationship'] = 'ppCO2', 'Exceeds_UWL'
symptom3['measurement'], symptom3['relationship'] = 'Water Level', 'Exceeds_UWL'
symptoms = [symptom1, symptom2, symptom3]
def diagnose_symptoms_neo4j(symptoms, session):
# build the query based on the symptoms list
query = ''
for id, symp in enumerate(symptoms):
query = query + 'MATCH (m' + str(id) + ':Measurement)-[r' + str(id) + ':' + symp['relationship'] + ']->(g:Anomaly) '
query = query + 'WHERE '
for id, symp in enumerate(symptoms):
if ((id + 1) < len(symptoms)):
query = query + 'm' + str(id) + '.Name=\'' + symp['measurement'] + '\' and '
else:
query = query + 'm' + str(id) + '.Name=\'' + symp['measurement'] + '\' RETURN DISTINCT g.Title'
print(query)
# query the database
result = session.run(query)
diagnosis = [node[0] for node in result]
return diagnosis
print(diagnose_symptoms_neo4j(symptoms, session))
# Function that can take an anomaly or list of anomalies (names) and query the related procedures
def get_related_procedures(anomaly, session):
query = ''
if type(anomaly) is list:
for id, anom in enumerate(anomaly):
query = query + 'MATCH (a' + str(id) + ':Anomaly)-[r' + str(id) + ':Solution]->(p:Procedure) '
query = query + 'WHERE '
for id, anom in enumerate(anomaly):
if ((id + 1) < len(anomaly)):
query = query + 'a' + str(id) + '.Title=\'' + anom + '\' and '
else:
query = query + 'a' + str(id) + '.Title=\'' + anom + '\' RETURN DISTINCT p.Title'
else:
query = query + 'MATCH (a:Anomaly)-[r:Solution]->(p:Procedure) WHERE a.Title=\'' + str(anomaly) + '\' RETURN DISTINCT p.Title'
print(query)
result = session.run(query)
procedures = [node[0] for node in result]
if not procedures:
return None
else:
return procedures
print(get_related_procedures('CDRA Failure', session))
print(get_related_procedures(['CDRA Failure', 'Excess CO2 in Cabin'], session))
# Function that can take a specific procedure and return all steps, substeps, and subsubsteps as an ordered list
def get_procedure_steps(procedure, session, detail=3):
# detail denotes the level of steps to return. (1->steps, 2->steps&substeps, 3->steps&substeps&subsubsteps)
if (detail == 1):
#Return only highest level steps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step) RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step) RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
elif (detail == 2):
#Return Steps and substeps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step OR s:SubStep) RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step OR s:SubStep) RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
else:
#Return all steps, substeps, and subsubsteps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
steps = []
result1 = session.run(query1)
step_titles = [node[0] for node in result1]
result2 = session.run(query2)
step_actions = [node[0] for node in result2]
for id, step in enumerate(step_titles):
steps.append(step_titles[id] + ' - ' + step_actions[id])
if not steps:
return None
else:
return steps
print(get_procedure_steps('Zeolite Filter Swapout', session, 3))
'''
# Function that returns the equipment required for a procedure or list of procedures
def get_procedure_equipment(procedure, session):
return equipment
'''
| 46.747475
| 177
| 0.642178
| 617
| 4,628
| 4.774716
| 0.226904
| 0.033944
| 0.03055
| 0.032587
| 0.418873
| 0.350645
| 0.250849
| 0.233876
| 0.210115
| 0.210115
| 0
| 0.015808
| 0.207217
| 4,628
| 98
| 178
| 47.22449
| 0.787135
| 0.143258
| 0
| 0.298507
| 0
| 0.089552
| 0.373087
| 0.072032
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.014925
| 0
| 0.134328
| 0.089552
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119a0cbab86f26fb6ab15f22092ddf49f71f6f94
| 5,486
|
py
|
Python
|
build.py
|
refaim/wots
|
dad9918c603293982a598fb5d6c73ade1a6080e1
|
[
"MIT"
] | 2
|
2018-07-14T19:45:38.000Z
|
2019-04-21T07:17:20.000Z
|
build.py
|
refaim/wots
|
dad9918c603293982a598fb5d6c73ade1a6080e1
|
[
"MIT"
] | 155
|
2018-07-07T00:33:31.000Z
|
2021-08-16T17:55:05.000Z
|
build.py
|
refaim/wots
|
dad9918c603293982a598fb5d6c73ade1a6080e1
|
[
"MIT"
] | null | null | null |
import datetime
import math
import os
import sys
import PyQt5
import dotenv
from PyInstaller.archive.pyz_crypto import PyiBlockCipher
from PyInstaller.building.api import PYZ, EXE, COLLECT
from PyInstaller.building.build_main import Analysis
from app import version
from app.core.utils import OsUtils, PathUtils
APP_NAME = 'wizard'
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
BUILD_DIRECTORY = os.path.join(PROJECT_ROOT, 'build')
DISTR_DIRECTORY = os.path.join(PROJECT_ROOT, 'dist', APP_NAME)
RESOURCES_DIRECTORY = os.path.join(PROJECT_ROOT, 'res')
BINARY_RESOURCE_EXTENSIONS = {'.png'}
dotenv.load_dotenv(os.path.join(PROJECT_ROOT, '.env'))
extra_path = []
app_version_file = None
app_version_ints = None
if OsUtils.is_windows():
extra_path.append(os.path.join(os.path.dirname(PyQt5.__file__), 'Qt', 'bin'))
extra_path.append(os.path.dirname(sys.executable))
if OsUtils.is_win10():
for program_files_var in ['ProgramFiles', 'ProgramFiles(x86)']:
for arch in ['x86', 'x64']:
dll_path = os.path.join(os.getenv(program_files_var), 'Windows Kits\\10\\Redist\\ucrt\\DLLs', arch)
if os.path.isdir(dll_path):
extra_path.append(dll_path)
app_version = version.VERSION
app_version_ints = [int(x) for x in app_version.split('.')]
while len(app_version_ints) < 4:
app_version_ints.append(0)
app_version_file = os.path.join(BUILD_DIRECTORY, 'exe_version.txt')
with open(os.path.join(PROJECT_ROOT, 'exe_version.template.txt')) as version_template_fobj:
with open(app_version_file, 'w') as version_fobj:
version_fobj.write(version_template_fobj.read().format(
version_string=str(app_version),
version_tuple=tuple(app_version_ints),
current_year=datetime.datetime.today().year))
txt_resources = []
if os.path.exists('.env'):
txt_resources.append(('.env', '.'))
bin_resources = []
for filename in os.listdir(RESOURCES_DIRECTORY):
target = txt_resources
if os.path.splitext(filename)[1] in BINARY_RESOURCE_EXTENSIONS:
target = bin_resources
target.append((os.path.join(RESOURCES_DIRECTORY, filename), os.path.relpath(RESOURCES_DIRECTORY, PROJECT_ROOT)))
block_cipher = None
cipher_key = os.getenv('PYINSTALLER_CIPHER_KEY')
if cipher_key:
block_cipher = PyiBlockCipher(key=cipher_key)
a = Analysis([os.path.join(PROJECT_ROOT, 'app', 'wizard.py')],
pathex=extra_path, binaries=bin_resources, datas=txt_resources, hiddenimports=[], hookspath=[],
runtime_hooks=[], excludes=[], win_no_prefer_redirects=True, win_private_assemblies=True,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(pyz, a.scripts, exclude_binaries=True, name=APP_NAME, debug=False, strip=False, upx=False, console=False, version=app_version_file)
COLLECT(exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=False, name=APP_NAME)
if OsUtils.is_windows():
nsis_license = os.path.join(BUILD_DIRECTORY, 'license.txt')
with open(os.path.join(PROJECT_ROOT, 'LICENSE')) as src_license_fobj:
with open(nsis_license, 'w') as dst_license_fobj:
dst_license_fobj.write(src_license_fobj.read().replace('\n', '\r\n'))
with open(os.path.join(PROJECT_ROOT, 'setup.template.nsi')) as nsis_template_fobj:
config = nsis_template_fobj.read()
distr_directories = []
distr_files = []
for root, dirs, files in os.walk(DISTR_DIRECTORY):
for dir_name in dirs:
distr_directories.append(os.path.join(root, dir_name))
for file_name in files:
distr_files.append(os.path.join(root, file_name))
def make_inst_path(path: str) -> str:
return PathUtils.quote(os.path.join('$INSTDIR', os.path.relpath(path, DISTR_DIRECTORY)))
def add_command(commands: list, command: str) -> None:
commands.append((' ' * 4) + command)
indent = ' ' * 4
install_commands = []
for path in distr_directories:
add_command(install_commands, 'CreateDirectory {}'.format(make_inst_path(path)))
for path in distr_files:
add_command(install_commands, 'File {} {}'.format(PathUtils.quote('/oname={}'.format(os.path.relpath(path, DISTR_DIRECTORY))), PathUtils.quote(path)))
uninstall_commands = []
for path in distr_files:
add_command(uninstall_commands, 'Delete {}'.format(make_inst_path(path)))
for path in reversed(distr_directories):
add_command(uninstall_commands, 'RMDir {}'.format(make_inst_path(path)))
arch = 'x64' if OsUtils.is_x64() else 'x86'
NSIS_VARS = {
'%license_file%': os.path.basename(nsis_license),
'%version_major%': str(app_version_ints[0]),
'%version_minor%': str(app_version_ints[1]),
'%version_build%': str(app_version_ints[2]),
'%install_size_kb%': str(math.ceil(PathUtils.get_folder_size(DISTR_DIRECTORY) / 1024)),
'%program_arch%': arch,
'%exe_name%': APP_NAME,
'%setup_name%': 'WizardOfTheSearch_v{}_Setup_{}'.format(version.VERSION, arch),
'%distr_directory%': DISTR_DIRECTORY,
'%install_commands%': '\r\n'.join(install_commands),
'%uninstall_commands%': '\r\n'.join(uninstall_commands),
}
for k, v in NSIS_VARS.items():
config = config.replace(k, v)
with open(os.path.join(BUILD_DIRECTORY, 'setup.nsi'), 'w') as nsis_fobj:
nsis_fobj.write(config)
| 42.527132
| 158
| 0.693584
| 744
| 5,486
| 4.857527
| 0.245968
| 0.046486
| 0.047039
| 0.037631
| 0.18041
| 0.098506
| 0.056447
| 0.034864
| 0
| 0
| 0
| 0.006598
| 0.171163
| 5,486
| 128
| 159
| 42.859375
| 0.788212
| 0
| 0
| 0.037037
| 0
| 0
| 0.093693
| 0.018957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.111111
| 0.009259
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119a349c2ca5822591f4b6677156eec1b27631d0
| 1,939
|
py
|
Python
|
server/constants.py
|
chrononyan/ok
|
1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf
|
[
"Apache-2.0"
] | 148
|
2018-07-03T02:08:30.000Z
|
2022-03-26T04:03:35.000Z
|
server/constants.py
|
chrononyan/ok
|
1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf
|
[
"Apache-2.0"
] | 856
|
2015-01-10T04:27:20.000Z
|
2018-06-27T14:43:23.000Z
|
server/constants.py
|
chrononyan/ok
|
1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf
|
[
"Apache-2.0"
] | 69
|
2015-01-26T08:06:55.000Z
|
2018-06-25T12:46:03.000Z
|
"""App constants"""
import os
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
ROLE_DISPLAY_NAMES = {
STUDENT_ROLE: 'Student',
GRADER_ROLE: 'Reader',
STAFF_ROLE: 'Teaching Assistant',
INSTRUCTOR_ROLE: 'Instructor',
LAB_ASSISTANT_ROLE: 'Lab Assistant',
}
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
SCORE_KINDS = ['composition', 'correctness', 'effort', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'checkpoint 1', 'checkpoint 2',
'private', 'autograder', 'error']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
OAUTH_OUT_OF_BAND_URI = 'urn:ietf:wg:oauth:2.0:oob'
COMMON_LANGUAGES = ['python', 'java', 'c', 'scheme', 'lisp', 'javascript']
COURSE_ENDPOINT_FORMAT = '^[\w\-]+/[\w\-]+/(fa|sp|su|wi|au|yr)\d\d$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
APPLICATION_ROOT = os.getenv('APPLICATION_ROOT', '/')
# The default autograder url
# Each course can configure their own autograder url in course.edit view
AUTOGRADER_URL = os.getenv('AUTOGRADER_URL', 'https://autograder.cs61a.org')
SENDGRID_KEY = os.getenv("SENDGRID_KEY")
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
# Service Providers
GOOGLE = "GOOGLE"
MICROSOFT = "MICROSOFT"
# Maximum file size to show in browser, in characters
DIFF_SIZE_LIMIT = 64 * 1024 # 64KB
SOURCE_SIZE_LIMIT = 10 * 1024 * 1024 # 10MB
MAX_UPLOAD_FILE_SIZE = 25 * 1024 * 1024 # 25MB
# Email client format for to field
EMAIL_FORMAT = "{name} <{email}>"
| 28.101449
| 89
| 0.684373
| 248
| 1,939
| 5.096774
| 0.556452
| 0.047468
| 0.050633
| 0.037975
| 0.178797
| 0.134494
| 0.082278
| 0.082278
| 0
| 0
| 0
| 0.024044
| 0.163486
| 1,939
| 68
| 90
| 28.514706
| 0.75524
| 0.118618
| 0
| 0
| 0
| 0.020408
| 0.310731
| 0.038915
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.020408
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
119efd61f102f9d7b866310597894dc025bd5e5a
| 466
|
py
|
Python
|
AlgoritimoRandomize.py
|
falluk/algoritimoDeBuscas
|
6dbca79ef60f2820f5e81110bc4104bdc46496b1
|
[
"MIT"
] | 1
|
2021-07-05T13:24:04.000Z
|
2021-07-05T13:24:04.000Z
|
AlgoritimoRandomize.py
|
falluk/algoritimoDeBuscas
|
6dbca79ef60f2820f5e81110bc4104bdc46496b1
|
[
"MIT"
] | null | null | null |
AlgoritimoRandomize.py
|
falluk/algoritimoDeBuscas
|
6dbca79ef60f2820f5e81110bc4104bdc46496b1
|
[
"MIT"
] | null | null | null |
#Criado para randomizar uma lsita de 20mil Colaboradores retornando apenas 1000 colaboradores vários cargos distintos.
import pandas as pd
import random
base = pd.read_excel("usuarios - energisa.xlsx", encoding="ISO-8859-1",error_bad_lines=False)
sort1 = base.sample(15000)
sort2 = sort1.sample(10000)
sort3 = sort2.sample(7500)
sort4 = sort3.sample(5000)
sort5 = sort4.sample(2500)
sorteado = sort5.sample(1000)
sorteado.to_excel("Lista Randomizada.xlsx")
| 22.190476
| 118
| 0.776824
| 66
| 466
| 5.424242
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114634
| 0.120172
| 466
| 20
| 119
| 23.3
| 0.758537
| 0.251073
| 0
| 0
| 0
| 0
| 0.16092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a158752080d596792054d55693dc41df752af9
| 7,625
|
py
|
Python
|
app.py
|
Build-Week-2106FT-AirBnB-3/front-end
|
0df6f9814387a36002a1aaa8feff1f17fcb30b78
|
[
"CC0-1.0"
] | null | null | null |
app.py
|
Build-Week-2106FT-AirBnB-3/front-end
|
0df6f9814387a36002a1aaa8feff1f17fcb30b78
|
[
"CC0-1.0"
] | 1
|
2021-06-24T00:17:40.000Z
|
2021-06-24T00:18:42.000Z
|
app.py
|
Build-Week-2106FT-AirBnB-3/pricing
|
0df6f9814387a36002a1aaa8feff1f17fcb30b78
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# https://towardsdatascience.com/build-a-machine-learning-simulation-tool-with-dash-b3f6fd512ad6
# We start with the import of standard ML librairies
import pandas as pd
import numpy as np
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestRegressor
# We add all Plotly and Dash necessary librairies
import plotly.graph_objects as go
import pickle
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from category_encoders import OneHotEncoder
from sklearn.model_selection import train_test_split
df = pd.read_csv('clean_data.csv')
# importing our model
# target='price'
# X = df.drop(columns=target)
# y = df[target]
# # Let's split into a test and
# X_train, X_t, y_train, y_t = train_test_split(X,y, test_size=.2, random_state=7)
# # Let's split our test data into validation and test
# X_val, X_test, y_val, y_test = train_test_split(X_t,y_t, test_size=.2, random_state=7)
# model = make_pipeline(OneHotEncoder(use_cat_names=True),
# SimpleImputer(),
# RandomForestRegressor(random_state=70))
# model.fit(X_train,y_train)
infile = open('random_forest_model', 'rb')
model = pickle.load(infile)
infile.close()
# # # We create a DataFrame to store the features' importance and their corresponding label
f_impor = model.named_steps['randomforestregressor'].feature_importances_
col_names = model.named_steps['onehotencoder'].get_feature_names()
df_feature_importances = pd.DataFrame(f_impor, columns=["Importance"], index=col_names)
df_feature_importances = df_feature_importances.sort_values("Importance", ascending=False).head(10)
# Create the bar chart and limit it to the top 10 features
# # We create a Features Importance Bar Chart
fig_features_importance = go.Figure()
fig_features_importance.add_trace(go.Bar(x=df_feature_importances.index,
y=df_feature_importances["Importance"],
marker_color='rgb(171, 226, 251)')
)
fig_features_importance.update_layout(title_text='<b>Features Importance of the model<b>', title_x=0.5)
# The command below can be activated in a standard notebook to display the chart
# fig_features_importance.show()
# We record the name, min, mean and max of the three most important features
dropdown_1_label = df_feature_importances.index[0]
dropdown_1_min = round(df[dropdown_1_label].min(),5)
dropdown_1_mean = round(df[dropdown_1_label].mean(),5)
dropdown_1_max = round(df[dropdown_1_label].max(),5)
dropdown_2_label = df_feature_importances.index[1]
dropdown_2_min = round(df[dropdown_2_label].min(),5)
dropdown_2_mean = round(df[dropdown_2_label].mean(),5)
dropdown_2_max = round(df[dropdown_2_label].max(),5)
dropdown_3_label = df_feature_importances.index[5]
dropdown_3_min = round(df[dropdown_3_label].min(),5)
dropdown_3_mean = round(df[dropdown_3_label].mean(),5)
dropdown_3_max = round(df[dropdown_3_label].max(),5)
###############################################################################
app = dash.Dash()
server = app.server
# The page structure will be:
# Features Importance Chart
# <H4> Feature #1 name
# Slider to update Feature #1 value
# <H4> Feature #2 name
# Slider to update Feature #2 value
# <H4> Feature #3 name
# Slider to update Feature #3 value
# <H2> Updated Prediction
# Callback fuction with Sliders values as inputs and Prediction as Output
# We apply basic HTML formatting to the layout
app.layout = html.Div(style={'textAlign': 'center', 'width': '800px', 'font-family': 'Verdana'},
children=[
# The same logic is applied to the following names / sliders
html.H1(children="Simulation Tool"),
#Dash Graph Component calls the fig_features_importance parameters
dcc.Graph(figure=fig_features_importance),
# We display the most important feature's name
html.H4(children=dropdown_1_label),
# The Dash Slider is built according to Feature #1 ranges
dcc.Slider(
id='X1_slider',
min=dropdown_1_min,
max=dropdown_1_max,
step=0.029311,
value=dropdown_1_mean,
marks={i: '{}°'.format(i) for i in np.arange(dropdown_1_min, dropdown_1_max)}
),
# The same logic is applied to the following names / sliders
html.H4(children=dropdown_2_label),
dcc.Slider(
id='X2_slider',
min=dropdown_2_min,
max=dropdown_2_max,
step=0.080384,
value=dropdown_2_mean,
marks={i: '{}°'.format(i) for i in np.arange(dropdown_2_min, dropdown_2_max)}
),
html.H4(children=dropdown_3_label),
dcc.Slider(
id='X3_slider',
min=dropdown_3_min,
max=dropdown_3_max,
step=0.6,
value=dropdown_3_mean,
marks={i: '{}people'.format(i) for i in np.arange(dropdown_2_min, dropdown_2_max)},
),
# The prediction result will be displayed and updated here
html.H2(id="prediction_result")
])
# The callback function will provide one "Ouput" in the form of a string (=children)
@app.callback(Output(component_id="prediction_result",component_property="children"),
# The values correspnding to the three sliders are obtained by calling their id and value property
[Input("X1_slider","value"), Input("X2_slider","value"), Input("X3_slider","value")])
# The input variable are set in the same order as the callback Inputs
def update_prediction(X1, X2, X3):
# We create a NumPy array in the form of the original features
# ["Pressure","Viscosity","Particles_size", "Temperature","Inlet_flow", "Rotating_Speed","pH","Color_density"]
# Except for the X1, X2 and X3, all other non-influencing parameters are set to their mean
input_X = np.array([258668827,
1,
1,
1,
X1,
X2,
X3,
df["bedrooms"].mean(),
df['beds'].mean(),
df['number_of_reviews'].mean(),
df["review_scores_rating"].mean(),
1,
1]).reshape(1, -1)
# Prediction is calculated based on the input_X array
prediction = model.named_steps['randomforestregressor'].predict(input_X)
# And retuned to the Output of the callback function
return "Prediction in Yen: {}".format(round(prediction[0]))
# return 'this is working'
if __name__ == "__main__":
app.run_server()
| 39.921466
| 114
| 0.6
| 947
| 7,625
| 4.6283
| 0.291447
| 0.030801
| 0.030801
| 0.022815
| 0.145791
| 0.065024
| 0.055441
| 0.055441
| 0.055441
| 0.055441
| 0
| 0.027366
| 0.300328
| 7,625
| 190
| 115
| 40.131579
| 0.793814
| 0.322885
| 0
| 0.104167
| 0
| 0
| 0.087
| 0.008362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.270833
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a1d2a8f067924755b1bb004f5652117e69edcd
| 1,787
|
py
|
Python
|
balloon_learning_environment/env/gym.py
|
johannah/balloon-learning-environment
|
cdb2e582f2b03c41f037bf76142d31611f5e0316
|
[
"Apache-2.0"
] | 64
|
2021-11-09T08:49:02.000Z
|
2022-03-30T17:33:54.000Z
|
balloon_learning_environment/env/gym.py
|
johannah/balloon-learning-environment
|
cdb2e582f2b03c41f037bf76142d31611f5e0316
|
[
"Apache-2.0"
] | null | null | null |
balloon_learning_environment/env/gym.py
|
johannah/balloon-learning-environment
|
cdb2e582f2b03c41f037bf76142d31611f5e0316
|
[
"Apache-2.0"
] | 5
|
2021-11-14T18:56:42.000Z
|
2022-03-18T16:22:31.000Z
|
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Balloon Learning Environment gym utilities."""
import contextlib
def register_env() -> None:
"""Register the Gym environment."""
# We need to import Gym's registration module inline or else we'll
# get a circular dependency that will result in an error when importing gym
from gym.envs import registration # pylint: disable=g-import-not-at-top
env_id = 'BalloonLearningEnvironment-v0'
env_entry_point = 'balloon_learning_environment.env.balloon_env:BalloonEnv'
# We guard registration by checking if our env is already registered
# This is necesarry because the plugin system will load our module
# which also calls this function. If multiple `register()` calls are
# made this will result in a warning to the user.
registered = env_id in registration.registry.env_specs
if not registered:
with contextlib.ExitStack() as stack:
# This is a workaround for Gym 0.21 which didn't support
# registering into the root namespace with the plugin system.
if hasattr(registration, 'namespace'):
stack.enter_context(registration.namespace(None))
registration.register(id=env_id, entry_point=env_entry_point)
| 43.585366
| 77
| 0.758254
| 262
| 1,787
| 5.118321
| 0.538168
| 0.044743
| 0.058166
| 0.023863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008796
| 0.172916
| 1,787
| 40
| 78
| 44.675
| 0.898512
| 0.673195
| 0
| 0
| 0
| 0
| 0.168784
| 0.15245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a1e1b730dc4e433d8e1358594ee3d9a8526d1b
| 15,181
|
py
|
Python
|
models/multimodal_transformer.py
|
XiaoJake/MTTR
|
c383c5b151e3c97aeb45cd2fb4bf08719016498b
|
[
"Apache-2.0"
] | 516
|
2021-11-30T03:22:41.000Z
|
2022-03-31T19:48:59.000Z
|
models/multimodal_transformer.py
|
codwest/MTTR
|
c383c5b151e3c97aeb45cd2fb4bf08719016498b
|
[
"Apache-2.0"
] | 15
|
2021-12-07T02:43:24.000Z
|
2022-03-27T15:59:32.000Z
|
models/multimodal_transformer.py
|
codwest/MTTR
|
c383c5b151e3c97aeb45cd2fb4bf08719016498b
|
[
"Apache-2.0"
] | 57
|
2021-11-30T08:49:51.000Z
|
2022-03-25T19:41:08.000Z
|
"""
MTTR Multimodal Transformer class.
Modified from DETR https://github.com/facebookresearch/detr
"""
import copy
import os
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from einops import rearrange, repeat
from transformers import RobertaModel, RobertaTokenizerFast
from models.position_encoding_2d import PositionEmbeddingSine2D
os.environ["TOKENIZERS_PARALLELISM"] = "false" # this disables a huggingface tokenizer warning (printed every epoch)
class MultimodalTransformer(nn.Module):
def __init__(self, num_encoder_layers=3, num_decoder_layers=3,
text_encoder_type="roberta-base", freeze_text_encoder=True, **kwargs):
super().__init__()
self.d_model = kwargs['d_model']
encoder_layer = TransformerEncoderLayer(**kwargs)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = TransformerDecoderLayer(**kwargs)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, norm=nn.LayerNorm(self.d_model),
return_intermediate=True)
self.pos_encoder_2d = PositionEmbeddingSine2D()
self._reset_parameters()
self.text_encoder = RobertaModel.from_pretrained(text_encoder_type)
self.text_encoder.pooler = None # this pooler is never used, this is a hack to avoid DDP problems...
self.tokenizer = RobertaTokenizerFast.from_pretrained(text_encoder_type)
self.freeze_text_encoder = freeze_text_encoder
if freeze_text_encoder:
for p in self.text_encoder.parameters():
p.requires_grad_(False)
self.txt_proj = FeatureResizer(
input_feat_size=self.text_encoder.config.hidden_size,
output_feat_size=self.d_model,
dropout=kwargs['dropout'],
)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, vid_embeds, vid_pad_mask, text_queries, obj_queries):
device = vid_embeds.device
t, b, _, h, w = vid_embeds.shape
txt_memory, txt_pad_mask = self.forward_text(text_queries, device)
# add temporal dim to txt memory & padding mask:
txt_memory = repeat(txt_memory, 's b c -> s (t b) c', t=t)
txt_pad_mask = repeat(txt_pad_mask, 'b s -> (t b) s', t=t)
vid_embeds = rearrange(vid_embeds, 't b c h w -> (h w) (t b) c')
# Concat the image & text embeddings on the sequence dimension
encoder_src_seq = torch.cat((vid_embeds, txt_memory), dim=0)
seq_mask = torch.cat((rearrange(vid_pad_mask, 't b h w -> (t b) (h w)'), txt_pad_mask), dim=1)
# vid_pos_embed is: [T*B, H, W, d_model]
vid_pos_embed = self.pos_encoder_2d(rearrange(vid_pad_mask, 't b h w -> (t b) h w'), self.d_model)
# use zeros in place of pos embeds for the text sequence:
pos_embed = torch.cat((rearrange(vid_pos_embed, 't_b h w c -> (h w) t_b c'), torch.zeros_like(txt_memory)), dim=0)
memory = self.encoder(encoder_src_seq, src_key_padding_mask=seq_mask, pos=pos_embed) # [S, T*B, C]
vid_memory = rearrange(memory[:h*w, :, :], '(h w) (t b) c -> t b c h w', h=h, w=w, t=t, b=b)
txt_memory = memory[h*w:, :, :]
txt_memory = rearrange(txt_memory, 's t_b c -> t_b s c')
txt_memory = [t_mem[~pad_mask] for t_mem, pad_mask in zip(txt_memory, txt_pad_mask)] # remove padding
# add T*B dims to query embeds (was: [N, C], where N is the number of object queries):
obj_queries = repeat(obj_queries, 'n c -> n (t b) c', t=t, b=b)
tgt = torch.zeros_like(obj_queries) # [N, T*B, C]
# hs is [L, N, T*B, C] where L is number of layers in the decoder
hs = self.decoder(tgt, memory, memory_key_padding_mask=seq_mask, pos=pos_embed, query_pos=obj_queries)
hs = rearrange(hs, 'l n (t b) c -> l t b n c', t=t, b=b)
return hs, vid_memory, txt_memory
def forward_text(self, text_queries, device):
tokenized_queries = self.tokenizer.batch_encode_plus(text_queries, padding='longest', return_tensors='pt')
tokenized_queries = tokenized_queries.to(device)
with torch.inference_mode(mode=self.freeze_text_encoder):
encoded_text = self.text_encoder(**tokenized_queries)
# Transpose memory because pytorch's attention expects sequence first
txt_memory = rearrange(encoded_text.last_hidden_state, 'b s c -> s b c')
txt_memory = self.txt_proj(txt_memory) # change text embeddings dim to model dim
# Invert attention mask that we get from huggingface because its the opposite in pytorch transformer
txt_pad_mask = tokenized_queries.attention_mask.ne(1).bool() # [B, S]
return txt_memory, txt_pad_mask
def num_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nheads, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, **kwargs):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nheads, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, **kwargs):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
super().__init__()
self.do_ln = do_ln
# Object feature encoding
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, encoder_features):
x = self.fc(encoder_features)
if self.do_ln:
x = self.layer_norm(x)
output = self.dropout(x)
return output
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 44.259475
| 122
| 0.625585
| 1,960
| 15,181
| 4.60051
| 0.131633
| 0.047577
| 0.059
| 0.058556
| 0.545082
| 0.488189
| 0.465121
| 0.448708
| 0.418432
| 0.407009
| 0
| 0.010033
| 0.277781
| 15,181
| 342
| 123
| 44.388889
| 0.812386
| 0.075687
| 0
| 0.43985
| 0
| 0
| 0.025173
| 0.001573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086466
| false
| 0
| 0.033835
| 0.015038
| 0.221805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a22b195401a97025bc1265b213cb97ff210032
| 403
|
py
|
Python
|
docker_sdk_api/shared/helpers/get_model_zip.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 20
|
2021-07-13T13:08:57.000Z
|
2022-03-29T09:38:00.000Z
|
docker_sdk_api/shared/helpers/get_model_zip.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | null | null | null |
docker_sdk_api/shared/helpers/get_model_zip.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 2
|
2021-07-12T08:42:53.000Z
|
2022-03-04T18:41:25.000Z
|
import os
from typing import Dict
def get_downloadable_zip(folder_path: str) -> Dict[str, str]:
servable_models: Dict[str, str] = {}
for root, dirs, files in os.walk(folder_path):
for directory in dirs:
for f in os.listdir(os.path.join(root, directory)):
if f.endswith(".zip"):
servable_models[f] = directory
return servable_models
| 28.785714
| 63
| 0.622829
| 55
| 403
| 4.436364
| 0.490909
| 0.172131
| 0.081967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272953
| 403
| 13
| 64
| 31
| 0.832765
| 0
| 0
| 0
| 0
| 0
| 0.009926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a302e0300bce122a82770aa16b84ca6e8d73b5
| 6,065
|
py
|
Python
|
groups/views.py
|
3crabs/class-book
|
f5de12be816aa9be889d8413007be8eb4abdf45f
|
[
"WTFPL"
] | 1
|
2020-11-19T14:49:41.000Z
|
2020-11-19T14:49:41.000Z
|
groups/views.py
|
3crabs/class-book
|
f5de12be816aa9be889d8413007be8eb4abdf45f
|
[
"WTFPL"
] | null | null | null |
groups/views.py
|
3crabs/class-book
|
f5de12be816aa9be889d8413007be8eb4abdf45f
|
[
"WTFPL"
] | null | null | null |
from django.core.mail import EmailMessage
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from accounting.models import Attendance, Result
from accounting.templatetags import my_tags
from class_book import settings
from groups.models import Group, Student
from subjects.models import Subject
import xlwt
def groups(request):
if request.POST:
item = Group(name=request.POST['name'])
item.save()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
def group(request, pk):
group = Group.objects.get(id=pk)
if 'delete' in request.POST:
group.delete()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_students(request, pk):
if request.POST:
item = Student(
name=request.POST['name'],
email=request.POST['email'],
group_id=pk,
)
item.save()
group = Group.objects.get(id=pk)
subjects = group.subjects.all()
for subject in subjects:
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = item
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = item
result.task = task
result.save()
group = Group.objects.get(id=pk)
return render(request, 'groups/info.html', locals())
def group_student(request, pk, id):
student = Student.objects.get(id=id)
if 'delete' in request.POST:
student.delete()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subjects(request, pk):
if request.POST:
group = Group.objects.get(id=pk)
subject = Subject.objects.get(id=request.POST['subject'])
group.subjects.add(subject)
group.save()
group = Group.objects.get(id=pk)
for student in group.student_set.all():
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = student
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = student
result.task = task
result.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subject(request, pk, id):
subject = Subject.objects.get(id=id)
if 'delete' in request.POST:
group = Group.objects.get(id=pk)
group.subjects.remove(subject)
group.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
else:
group = Group.objects.get(id=pk)
itogs = {}
for student in group.student_set.all():
itogs[student.id] = student.id + 1
print(itogs)
return render(request, 'accouting/index.html', locals())
def create_xls_(group, subject):
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(group.name)
sheet.write(0, 0, "Успеваемость группы " + group.name + " по предмету " + subject.name)
row = 1
col = 0
sheet.write(row, col, "Посещаемость")
row += 1
sheet.write(row, col, "Студент")
col += 1
for lesson in subject.lesson_set.all():
sheet.write(row, col, lesson.name)
col += 1
sheet.write(row, col, "Посещаемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for attendance in student.attendance_set.filter(lesson__subject_id=subject.id):
sheet.write(row, col, attendance.visit)
col += 1
sheet.write(row, col, my_tags.lessons(student, subject))
row += 1
col = 0
sheet.write(row, col, "Результаты")
row += 1
sheet.write(row, col, "Студент")
col += 1
for task in subject.task_set.all():
sheet.write(row, col, task.name)
col += 1
sheet.write(row, col, "Успеваемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for result in student.result_set.filter(task__subject_id=subject.id):
sheet.write(row, col, result.rating)
col += 1
sheet.write(row, col, my_tags.tasks(student, subject))
row += 1
col = 0
path = "groups/static/docs/spreadsheet-" + str(group.id) + "-" + str(subject.id) + ".xlsx"
book.save(path)
return path
def create_xls(request, pk, id):
group = Group.objects.get(id=pk)
subject = group.subjects.get(id=id)
path = create_xls_(group, subject)
file = open(path, 'rb')
response = HttpResponse(file, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=table.xlsx'
return response
def sending(request, pk, id):
group = Group.objects.get(id=pk)
students = group.student_set.all()
emails = [student.email for student in students]
email = EmailMessage(
'Результаты',
'Здравствуй, вот ваша успеваемость',
settings.EMAIL_HOST_USER,
emails
)
path = create_xls_(group, Subject.objects.get(id=id))
email.attach_file(path)
email.send()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 32.433155
| 115
| 0.616983
| 755
| 6,065
| 4.886093
| 0.156291
| 0.024397
| 0.0553
| 0.060721
| 0.580645
| 0.545405
| 0.506912
| 0.446191
| 0.384657
| 0.31987
| 0
| 0.005342
| 0.259192
| 6,065
| 186
| 116
| 32.607527
| 0.815713
| 0
| 0
| 0.51875
| 0
| 0
| 0.082935
| 0.015829
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05625
| false
| 0
| 0.05625
| 0
| 0.18125
| 0.00625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11a8090bef6d5fb982bc2e421b4aadbc73c27dfc
| 3,861
|
py
|
Python
|
src/tree/tree_builder.py
|
rpSebastian/LeducPoker
|
5bbdf61d885bcb23490410ef871de924c58bbf01
|
[
"MIT"
] | 1
|
2020-05-22T15:45:22.000Z
|
2020-05-22T15:45:22.000Z
|
src/tree/tree_builder.py
|
rpSebastian/LeducPoker
|
5bbdf61d885bcb23490410ef871de924c58bbf01
|
[
"MIT"
] | null | null | null |
src/tree/tree_builder.py
|
rpSebastian/LeducPoker
|
5bbdf61d885bcb23490410ef871de924c58bbf01
|
[
"MIT"
] | 1
|
2020-05-31T03:01:42.000Z
|
2020-05-31T03:01:42.000Z
|
from settings import constants
from game import bet_sizing, card_tools, card_to_string
from base import Node
import torch
class PokerTreeBuilder():
def __init__(self):
pass
def build_tree(self, params):
root = Node()
root.street = params.root_node.street
root.bets = params.root_node.bets.clone()
root.current_player = params.root_node.current_player
root.board = params.root_node.board.clone()
root.board_string = params.root_node.board_string
self.build_tree_dfs(root)
return root
def build_tree_dfs(self, current_node):
current_node.pot = torch.min(current_node.bets).item()
children = self.get_children_nodes(current_node)
current_node.children = children
for child in children:
self.build_tree_dfs(child)
def get_children_nodes(self, parent_node):
if parent_node.terminal:
return []
chance_node = parent_node.current_player == constants.players.chance
if chance_node:
return self.get_children_chance_nodes(parent_node)
else:
return self.get_children_player_nodes(parent_node)
def get_children_chance_nodes(self, parent_node):
children = []
next_boards = card_tools.get_second_round_boards()
for board in next_boards:
chance_node = Node(parent_node)
chance_node.current_player = constants.players.P1
chance_node.street = parent_node.street + 1
chance_node.board = board
chance_node.board_string = card_to_string.cards_to_string(board)
chance_node.num_bets = 0
chance_node.action = chance_node.board_string
children.append(chance_node)
return children
def get_children_player_nodes(self, parent_node):
children = []
# fold action
fold_node = Node(parent_node)
fold_node.terminal = True
fold_node.action = "fold"
fold_node.node_type = constants.node_types.terminal_fold
children.append(fold_node)
# P1 start check action
if (parent_node.current_player == constants.players.P1 and
parent_node.bets[0] == parent_node.bets[1]):
check_node = Node(parent_node)
check_node.action = "check"
children.append(check_node)
# raise -> ( P1 / P2 call ) -> chance
# P1 check -> (P2 check ) -> chance
elif parent_node.street == 0 and (
parent_node.bets[0] != parent_node.bets[1] or
parent_node.bets[0] == parent_node.bets[1] and
parent_node.current_player == constants.players.P2
):
chance_node = Node(parent_node)
chance_node.current_player = constants.players.chance
chance_node.bets[:] = chance_node.bets.max()
chance_node.action = "call" if parent_node.bets[0] != parent_node.bets[1] else "check"
children.append(chance_node)
# call -> terminal
else:
terminal_call_node = Node(parent_node)
terminal_call_node.current_player = 1 - constants.players.P2
terminal_call_node.terminal = True
terminal_call_node.node_type = constants.node_types.terminal_call
terminal_call_node.bets[:] = terminal_call_node.bets.max()
terminal_call_node.action = "call"
children.append(terminal_call_node)
# raise action
possible_bets = bet_sizing.get_possible_bets(parent_node)
for possible_bet in possible_bets:
raise_node = Node(parent_node)
raise_node.bets = possible_bet
raise_node.num_bets += 1
raise_node.action = "raise"
children.append(raise_node)
return children
tree_builder = PokerTreeBuilder()
| 38.227723
| 98
| 0.644911
| 474
| 3,861
| 4.934599
| 0.154008
| 0.111159
| 0.047884
| 0.046174
| 0.22018
| 0.197093
| 0.140231
| 0.107738
| 0.082086
| 0.053869
| 0
| 0.007854
| 0.27454
| 3,861
| 100
| 99
| 38.61
| 0.827205
| 0.034447
| 0
| 0.121951
| 0
| 0
| 0.007256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0.012195
| 0.04878
| 0
| 0.207317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11ab85dad8fb08a5c5eee01b9be2f4e803d8712c
| 50,062
|
py
|
Python
|
src/htsql/core/tr/bind.py
|
sirex/htsql
|
52275f6a584b412c109822d2ed2a5e69ac522cdf
|
[
"Apache-2.0"
] | null | null | null |
src/htsql/core/tr/bind.py
|
sirex/htsql
|
52275f6a584b412c109822d2ed2a5e69ac522cdf
|
[
"Apache-2.0"
] | null | null | null |
src/htsql/core/tr/bind.py
|
sirex/htsql
|
52275f6a584b412c109822d2ed2a5e69ac522cdf
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.tr.bind`
=========================
This module implements the binding process.
"""
from ..util import maybe, listof, tupleof, similar
from ..adapter import Adapter, Protocol, adapt, adapt_many
from ..domain import (Domain, BooleanDomain, IntegerDomain, DecimalDomain,
FloatDomain, UntypedDomain, EntityDomain, RecordDomain, ListDomain,
IdentityDomain, VoidDomain)
from ..classify import normalize
from ..error import Error, translate_guard, choices_guard, point
from ..syn.syntax import (Syntax, CollectSyntax, SelectSyntax, ApplySyntax,
FunctionSyntax, PipeSyntax, OperatorSyntax, PrefixSyntax,
ProjectSyntax, FilterSyntax, LinkSyntax, DetachSyntax, AttachSyntax,
AssignSyntax, ComposeSyntax, LocateSyntax, IdentitySyntax, GroupSyntax,
IdentifierSyntax, UnpackSyntax, ReferenceSyntax, LiftSyntax,
StringSyntax, LabelSyntax, NumberSyntax, RecordSyntax, DirectSyntax)
from .binding import (Binding, WrappingBinding, CollectBinding, RootBinding,
HomeBinding, TableBinding, ChainBinding, ColumnBinding,
QuotientBinding, KernelBinding, ComplementBinding, LocateBinding,
SieveBinding, AttachBinding, SortBinding, CastBinding, IdentityBinding,
ImplicitCastBinding, RescopingBinding, AssignmentBinding,
DefineBinding, DefineReferenceBinding, DefineCollectionBinding,
DefineLiftBinding, SelectionBinding, WildSelectionBinding,
DirectionBinding, TitleBinding, RerouteBinding,
ReferenceRerouteBinding, AliasBinding, LiteralBinding, FormulaBinding,
VoidBinding, Recipe, LiteralRecipe, SelectionRecipe, FreeTableRecipe,
AttachedTableRecipe, ColumnRecipe, KernelRecipe, ComplementRecipe,
IdentityRecipe, ChainRecipe, SubstitutionRecipe, BindingRecipe,
ClosedRecipe, PinnedRecipe, AmbiguousRecipe)
from .lookup import (lookup_attribute, lookup_reference, lookup_complement,
lookup_attribute_set, lookup_reference_set, expand, direct, guess_tag,
identify, unwrap)
from .signature import IsEqualSig, AndSig
from .coerce import coerce
from .decorate import decorate
class BindingState(object):
def __init__(self, root, environment=None):
assert isinstance(root, RootBinding)
# The root lookup scope.
self.root = root
# The current lookup scope.
self.scope = root
# The stack of previous lookup scopes.
self.scope_stack = []
# References in the root scope.
self.environment = environment
if self.environment is not None:
collection = {}
for name, recipe in self.environment:
name = normalize(name)
collection[name] = recipe
if collection:
self.scope = DefineCollectionBinding(
self.scope, collection, True, self.scope.syntax)
def push_scope(self, scope):
"""
Sets the new lookup scope.
This function stores the current scope in the stack and makes
the given binding the new lookup scope. Use the :attr:`scope`
attribute to get the current scope; :meth:`pop_scope` to restore
the previous scope.
`scope` (:class:`htsql.core.tr.binding.Binding`)
The new lookup scope.
"""
# Sanity check on the argument.
assert isinstance(scope, Binding)
# Ensure that the root scope was set.
assert self.root is not None
# Save the current lookup scope.
self.scope_stack.append(self.scope)
# Assign the new lookup scope.
self.scope = scope
def pop_scope(self):
"""
Restores the previous lookup scope.
This functions restores the previous lookup scope from the stack.
Use the :attr:`scope` attribute to get the current scope;
:meth:`push_scope` to change the current scope.
"""
# Restore the prevous lookup scope from the stack.
self.scope = self.scope_stack.pop()
def bind(self, syntax, scope=None):
"""
Binds the given syntax node using the current binding state.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
with translate_guard(syntax):
if scope is not None:
self.push_scope(scope)
binding = Bind.__prepare__(syntax, self)()
if scope is not None:
self.pop_scope()
return binding
def use(self, recipe, syntax, scope=None):
"""
Applies a recipe to produce a binding node.
Returns a binding node.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
The recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByRecipe` adapter.
with translate_guard(syntax):
binding = BindByRecipe.__invoke__(recipe, syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
def call(self, syntax, scope=None):
"""
Binds a global function or a global identifier.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup context is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByName` protocol.
with translate_guard(syntax):
binding = BindByName.__invoke__(syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
class Bind(Adapter):
"""
Translates a syntax node to a binding node.
This is an interface adapter; see subclasses for implementations.
The binding process resolves identifiers against database objects,
resolves and validates operators and function calls, and determine
types of all expression.
The :class:`Bind` adapter has the following signature::
Bind: (Syntax, BindingState) -> Binding
The adapter is polymorphic on the `Syntax` argument.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`state` (:class:`BindingState`)
The current state of the binding process.
"""
adapt(Syntax)
def __init__(self, syntax, state):
assert isinstance(syntax, Syntax)
assert isinstance(state, BindingState)
self.syntax = syntax
self.state = state
def __call__(self):
# The default implementation raises an error. It is actually
# unreachable since we provide an implementation for all syntax nodes.
raise Error("Unable to bind a node")
def hint_choices(choices):
# Generate a hint from a list of choices.
assert isinstance(choices, listof(unicode))
if not choices:
return None
chunks = ["did you mean:"]
if len(choices) == 1:
chunks.append("'%s'" % choices[0].encode('utf-8'))
else:
chunks.append(", ".join("'%s'" % choice.encode('utf-8')
for choice in choices[:-1]))
chunks.append("or")
chunks.append("'%s'" % choices[-1].encode('utf-8'))
return " ".join(chunks)
class BindCollect(Bind):
adapt(CollectSyntax)
def __call__(self):
## FIXME: an empty segment syntax should not be generated.
#if self.syntax.arm is None:
# raise Error("output columns are not specified",
# self.syntax.mark)
# Bind the segment expression.
if self.syntax.arm is not None:
seed = self.state.bind(self.syntax.arm)
if isinstance(seed, AssignmentBinding):
with translate_guard(seed):
if len(seed.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-segment assignment")
if seed.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-segment assignment")
name, is_reference = seed.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(seed.body))
else:
recipe = SubstitutionRecipe(self.state.scope, [],
None, seed.body)
recipe = ClosedRecipe(recipe)
syntax = seed.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm
seed = self.state.use(recipe, syntax)
else:
seed = self.state.scope
seed = Select.__invoke__(seed, self.state)
domain = ListDomain(seed.domain)
return CollectBinding(self.state.scope, seed, domain,
self.syntax)
class Select(Adapter):
adapt(Domain)
@classmethod
def __dispatch__(interface, binding, *args, **kwds):
assert isinstance(binding, Binding)
return (type(binding.domain),)
def __init__(self, binding, state):
self.binding = binding
self.state = state
def __call__(self):
domain = coerce(self.binding.domain)
if domain is None:
# FIXME: separate implementation for VoidDomain with a better error
# message.
raise Error("Output column must be scalar")
return ImplicitCastBinding(self.binding, domain, self.binding.syntax)
class SelectRecord(Select):
adapt_many(EntityDomain,
RecordDomain)
def __call__(self):
recipes = expand(self.binding, with_syntax=True, with_wild=True,
with_class=True)
if recipes is None:
return super(SelectRecord, self).__call__()
elements = []
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=self.binding)
element = Select.__invoke__(element, self.state)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
binding = SelectionBinding(self.binding, elements, domain,
self.binding.syntax)
return binding
class SelectList(Select):
adapt(ListDomain)
def __call__(self):
return self.binding
class SelectIdentity(Select):
adapt(IdentityDomain)
def __call__(self):
return self.binding
class SelectUntyped(Select):
adapt(UntypedDomain)
def __call__(self):
return self.binding
class BindSelect(Bind):
adapt(SelectSyntax)
def __call__(self):
scope = self.state.bind(self.syntax.larm)
return self.state.bind(self.syntax.rarm, scope=scope)
class BindRecord(Bind):
adapt(RecordSyntax)
def __call__(self):
# Extract selector elements.
elements = []
scope = self.state.scope
self.state.push_scope(scope)
for arm in self.syntax.arms:
binding = self.state.bind(arm)
# Handle in-selector assignments.
if isinstance(binding, AssignmentBinding):
with translate_guard(binding):
if len(binding.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-selector assignment")
if binding.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-selector assignment")
name, is_reference = binding.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(binding.body))
else:
recipe = SubstitutionRecipe(scope, [],
None, binding.body)
recipe = ClosedRecipe(recipe)
syntax = binding.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm.larms[0]
binding = self.state.use(recipe, syntax)
if is_reference:
scope = DefineReferenceBinding(scope, name,
recipe, scope.syntax)
else:
scope = DefineBinding(scope, name, None,
recipe, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
# Extract nested selectors, if any.
bindings = []
recipes = expand(binding, with_wild=True)
if recipes is not None:
seed = binding
for syntax, recipe in recipes:
binding = self.state.use(recipe, syntax)
binding = RescopingBinding(binding, seed, binding.syntax)
bindings.append(binding)
else:
bindings.append(binding)
# Handle in-selector direction decorators.
order = []
for binding in bindings:
direction = direct(binding)
if direction is not None:
order.append(binding)
if order:
scope = SortBinding(scope, order, None, None, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
elements.extend(bindings)
self.state.pop_scope()
# Generate a selection scope.
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return SelectionBinding(scope, elements, domain, self.syntax)
class BindApply(Bind):
adapt(ApplySyntax)
def __call__(self):
# Look for the parameterized attribute in the current local scope.
recipe = lookup_attribute(self.state.scope,
self.syntax.name, len(self.syntax.arguments))
if recipe is not None:
binding = self.state.use(recipe, self.syntax)
# If not found, look for a global function.
else:
binding = self.state.call(self.syntax)
return binding
class BindOperator(Bind):
adapt(OperatorSyntax)
def __call__(self):
# Look for the operator in the global scope. We skip the local scope
# as there is no way to add an operator to a local scope.
return self.state.call(self.syntax)
class BindProject(Bind):
adapt(ProjectSyntax)
def __call__(self):
# Get the seed of the quotient.
seed = self.state.bind(self.syntax.larm)
# get the kernel expressions.
elements = []
binding = self.state.bind(self.syntax.rarm, scope=seed)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=binding)
element = RescopingBinding(element, binding, element.syntax)
elements.append(element)
else:
elements.append(binding)
# Validate types of the kernel expressions.
kernels = []
for element in elements:
domain = coerce(element.domain)
with translate_guard(element):
if domain is None:
raise Error("Expected a scalar column")
kernel = ImplicitCastBinding(element, domain, element.syntax)
kernels.append(kernel)
# Generate the quotient scope.
quotient = QuotientBinding(self.state.scope, seed, kernels,
self.syntax)
# Assign names to the kernel and the complement links when possible.
binding = quotient
name = guess_tag(seed)
if name is not None:
recipe = ComplementRecipe(quotient)
recipe = ClosedRecipe(recipe)
binding = DefineBinding(binding, name, None, recipe, self.syntax)
for index, kernel in enumerate(kernels):
name = guess_tag(kernel)
if name is not None:
recipe = KernelRecipe(quotient, index)
recipe = ClosedRecipe(recipe)
binding = DefineBinding(binding, name, None, recipe,
self.syntax)
return binding
class BindFilter(Bind):
adapt(FilterSyntax)
def __call__(self):
# Get the sieve base.
base = self.state.bind(self.syntax.larm)
# Bind the filter and force the Boolean type on it.
filter = self.state.bind(self.syntax.rarm, scope=base)
filter = ImplicitCastBinding(filter, coerce(BooleanDomain()),
filter.syntax)
# Produce a sieve scope.
return SieveBinding(base, filter, self.syntax)
class BindLink(Bind):
adapt(LinkSyntax)
def __call__(self):
# Bind the origin images.
origin_images = []
binding = self.state.bind(self.syntax.larm)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax)
element = RescopingBinding(element, binding, element.syntax)
origin_images.append(element)
else:
origin_images.append(binding)
# Bind the target scope.
home = HomeBinding(self.state.scope, self.syntax)
seed = self.state.bind(self.syntax.rarm, scope=home)
# Bind the target images; if not provided, reuse the syntax node
# of the origin images.
binding = seed
target_images = []
recipes = expand(seed, with_syntax=True)
if recipes is None:
binding = self.state.bind(self.syntax.larm, scope=seed)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=seed)
element = RescopingBinding(element, binding, element.syntax)
target_images.append(element)
else:
target_images.append(binding)
# Correlate origin and target images.
if len(origin_images) != len(target_images):
raise Error("Found unbalanced origin and target columns")
images = []
for origin_image, target_image in zip(origin_images, target_images):
domain = coerce(origin_image.domain, target_image.domain)
if domain is None:
raise Error("Cannot coerce origin and target columns"
" to a common type")
origin_image = ImplicitCastBinding(origin_image, domain,
origin_image.syntax)
target_image = ImplicitCastBinding(target_image, domain,
target_image.syntax)
images.append((origin_image, target_image))
# Generate a link scope.
return AttachBinding(self.state.scope, seed, images, None, self.syntax)
class BindAttach(Bind):
adapt(AttachSyntax)
def __call__(self):
home = HomeBinding(self.state.scope, self.syntax)
seed = self.state.bind(self.syntax.rarm, scope=home)
recipe = BindingRecipe(seed)
scope = self.state.scope
scope = DefineLiftBinding(scope, recipe, self.syntax)
name = guess_tag(seed)
if name is not None:
scope = DefineBinding(scope, name, None, recipe, self.syntax)
condition = self.state.bind(self.syntax.larm, scope=scope)
condition = ImplicitCastBinding(condition, coerce(BooleanDomain()),
condition.syntax)
return AttachBinding(self.state.scope, seed, [], condition, self.syntax)
class BindDetach(Bind):
adapt(DetachSyntax)
def __call__(self):
# Make the home scope.
home = HomeBinding(self.state.scope, self.syntax)
# Bind the operand against the home scope.
return self.state.bind(self.syntax.arm, scope=home)
class BindAssign(Bind):
adapt(AssignSyntax)
def __call__(self):
# Parse the left side of the assignment. It takes one of the forms:
# $reference := ...
# identifier := ...
# identifier(parameter,...) := ...
# parent. ... .identifier(parameter,...) := ...
# parent. ... .$identifier(parameter,...) := ...
# The dot-separated names and reference indicators.
terms = []
parameters = None
syntax = self.syntax.larm
for idx, arm in enumerate(syntax.larms):
if isinstance(arm, ReferenceSyntax):
with translate_guard(arm):
if idx < len(syntax.larms)-1:
raise Error("Expected an identifier")
terms.append((arm.identifier.name, True))
else:
terms.append((arm.name, False))
if syntax.rarms is not None:
parameters = []
for arm in syntax.rarms:
if isinstance(arm, ReferenceSyntax):
parameters.append((arm.identifier.name, True))
else:
parameters.append((arm.name, False))
# The right side of the assignment expression.
body = self.syntax.rarm
# Generate an assignment node.
return AssignmentBinding(self.state.scope, terms, parameters, body,
self.syntax)
class BindCompose(Bind):
adapt(ComposeSyntax)
def __call__(self):
# Expression:
# parent . child
# evaluates `child` in the scope of `parent`.
scope = self.state.bind(self.syntax.larm)
binding = self.state.bind(self.syntax.rarm, scope=scope)
return binding
class BindLocate(Bind):
adapt(LocateSyntax)
def __call__(self):
seed = self.state.bind(self.syntax.larm)
recipe = identify(seed)
with translate_guard(seed):
if recipe is None:
raise Error("Cannot determine identity")
identity = self.state.use(recipe, self.syntax.rarm, scope=seed)
location = self.state.bind(self.syntax.rarm, scope=seed)
with translate_guard(self.syntax.rarm):
if identity.domain.width != location.width:
raise Error("Found ill-formed locator")
def convert(identity, elements):
assert isinstance(identity, IdentityBinding)
images = []
for field in identity.elements:
if isinstance(field.domain, IdentityDomain):
total_width = 0
items = []
while total_width < field.domain.width:
assert elements
element = elements.pop(0)
if (total_width == 0 and
isinstance(element, IdentityBinding) and
element.width == field.domain.width):
items = element.elements[:]
total_width = element.width
elif isinstance(element, IdentityBinding):
items.append(element)
total_width += element.width
else:
items.append(element)
total_width += 1
with translate_guard(self.syntax.rarm):
if total_width > field.domain.width:
raise Error("Found ill-formed locator")
images.extend(convert(field, items))
else:
assert elements
element = elements.pop(0)
with translate_guard(self.syntax.larm):
if isinstance(element, IdentityBinding):
raise Error("Found ill-formed locator")
item = ImplicitCastBinding(element, field.domain,
element.syntax)
images.append((item, field))
return images
elements = location.elements[:]
while len(elements) == 1 and isinstance(elements[0], IdentityBinding):
elements = elements[0].elements[:]
images = convert(identity, elements)
return LocateBinding(self.state.scope, seed, images, None, self.syntax)
class BindIdentity(Bind):
adapt(IdentitySyntax)
def __call__(self):
elements = []
for arm in self.syntax.arms:
element = self.state.bind(arm)
identity = unwrap(element, IdentityBinding, is_deep=False)
if identity is not None:
element = identity
elements.append(element)
return IdentityBinding(self.state.scope, elements, self.syntax)
class BindGroup(Bind):
adapt(GroupSyntax)
def __call__(self):
# Bind the expression in parenthesis, then wrap the result
# to attach the original syntax node.
binding = self.state.bind(self.syntax.arm)
return WrappingBinding(binding, self.syntax)
class BindIdentifier(Bind):
adapt(IdentifierSyntax)
def __call__(self):
# Look for the identifier in the current lookup scope.
recipe = lookup_attribute(self.state.scope, self.syntax.name)
if recipe is not None:
binding = self.state.use(recipe, self.syntax)
# If not found, try the global scope.
else:
binding = self.state.call(self.syntax)
return binding
class BindUnpack(Bind):
adapt(UnpackSyntax)
def __call__(self):
# Get all public columns in the current lookup scope.
recipes = expand(self.state.scope, with_syntax=True, with_wild=True,
with_class=True, with_link=True)
if recipes is None:
raise Error("Cannot expand '*' since output columns"
" are not defined")
# If a position is given, extract a specific element.
if self.syntax.index is not None:
index = self.syntax.index
index -= 1
if not (0 <= index < len(recipes)):
raise Error("Expected value in range 1-%s" % len(recipes))
syntax, recipe = recipes[index]
syntax = point(syntax, self.syntax)
return self.state.use(recipe, syntax)
# Otherwise, generate a selection node.
elements = []
for syntax, recipe in recipes:
syntax = point(syntax, self.syntax)
element = self.state.use(recipe, syntax)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return WildSelectionBinding(self.state.scope, elements, domain,
self.syntax)
class BindDirect(Bind):
adapt(DirectSyntax)
def __call__(self):
base = self.state.bind(self.syntax.arm)
direction = {u'+': +1, u'-': -1}[self.syntax.symbol]
return DirectionBinding(base, direction, self.syntax)
class BindReference(Bind):
adapt(ReferenceSyntax)
def __call__(self):
# Look for a reference, complain if not found.
recipe = lookup_reference(self.state.scope,
self.syntax.identifier.name)
if recipe is None:
model = self.syntax.identifier.name.lower()
names = lookup_reference_set(self.state.scope)
choices = [u"$"+name for name in sorted(names)
if similar(model, name)]
with choices_guard(choices):
raise Error("Found unknown reference", self.syntax)
return self.state.use(recipe, self.syntax)
class BindLift(Bind):
adapt(LiftSyntax)
def __call__(self):
# Look for a complement, complain if not found.
recipe = lookup_complement(self.state.scope)
if recipe is None:
raise Error("'^' could only be used in a quotient scope")
return self.state.use(recipe, self.syntax)
class BindString(Bind):
adapt_many(StringSyntax,
LabelSyntax)
def __call__(self):
# Bind a quoted literal. Note that a quoted literal not necessarily
# represents a string value; its initial domain is untyped.
binding = LiteralBinding(self.state.scope,
self.syntax.text,
UntypedDomain(),
self.syntax)
return binding
class BindNumber(Bind):
adapt(NumberSyntax)
def __call__(self):
# Bind an unquoted (numeric) literal.
# Create an untyped literal binding.
binding = LiteralBinding(self.state.scope,
self.syntax.text,
UntypedDomain(),
self.syntax)
# Cast the binding to an appropriate numeric type.
if self.syntax.is_float:
domain = coerce(FloatDomain())
elif self.syntax.is_decimal:
domain = coerce(DecimalDomain())
elif self.syntax.is_integer:
domain = coerce(IntegerDomain())
binding = ImplicitCastBinding(binding, domain, self.syntax)
return binding
class BindByName(Protocol):
"""
Binds a application node.
This is an abstract protocol interface that provides a mechanism
for name-based dispatch of application syntax nodes.
The :class:`BindByName` interface has the following signature::
BindByName: (ApplicationSyntax, BindingState) -> Binding
BindByName: (IdentifierSyntax, BindingState) -> Binding
The protocol is polymorphic on the name and the number of arguments
of the syntax node.
To add an implementation of the interface, define a subclass
of :class:`BindByName` and specify its name and expected number
of arguments using function :func:`call`.
Class attributes:
`names` (a list of names or pairs `(name, length)`)
List of names the component matches.
Here `name` is a non-empty string, `length` is an integer or
``None``, where ``-1`` indicates any number of arguments, ``None``
means no arguments are accepted.
"""
names = []
@classmethod
def __dominates__(component, other):
# Determine if the component dominates another component
# assuming that they match the same dispatch key.
# A component implementing a protocol interface dominates
# another component if one of the following two conditions
# holds:
# (1) The component is a subclass of the other component.
if issubclass(component, other):
return True
# (2) The component and the other component match the
# same name, but the former requires a fixed number of
# arguments while the latter accepts a node with any
# number of arguments.
for name in component.__names__:
arity = -1
if isinstance(name, tuple):
name, arity = name
name = name.lower()
for other_name in other.__names__:
other_arity = -1
if isinstance(other_name, tuple):
other_name, other_arity = other_name
other_name = other_name.lower()
if name == other_name:
if arity != -1 and other_arity == -1:
return True
return False
@classmethod
def __matches__(component, dispatch_key):
# Check if the component matches the given function name
# and the number of arguments.
assert isinstance(dispatch_key, tupleof(unicode, maybe(int)))
# The name and the number of arguments of the call node.
key_name, key_arity = dispatch_key
# We want to compare names case insensitive. Unfortunately,
# we cannot use `normalize` from `htsql.core.tr.lookup` since it
# mangles symbols.
key_name = key_name.lower()
# Check if any of the component names matches the given name.
for name in component.__names__:
# `name` could be either a string or a pair of a string
# and an integer. The former assumes that the component
# accepts call nodes with any number of arguments.
arity = -1
if isinstance(name, tuple):
name, arity = name
name = name.lower()
# Check if the component name matches the node name.
if name == key_name:
if ((arity == key_arity) or
(arity == -1 and key_arity is not None)):
return True
# None of the names matched the dispatch key.
return False
@classmethod
def __dispatch__(interface, syntax, *args, **kwds):
assert isinstance(syntax, (ApplySyntax, IdentifierSyntax))
# We override `dispatch` since, as opposed to regular protocol
# interfaces, we also want to take into account not only the
# function name, but also the number of arguments.
if isinstance(syntax, ApplySyntax):
name = syntax.name
arity = len(syntax.arguments)
elif isinstance(syntax, IdentifierSyntax):
name = syntax.name
arity = None
return (name, arity)
def __init__(self, syntax, state):
assert isinstance(syntax, (ApplySyntax, IdentifierSyntax))
assert isinstance(state, BindingState)
self.syntax = syntax
self.state = state
# Extract commonly accessed attributes of the call node.
if isinstance(syntax, ApplySyntax):
self.name = syntax.name
self.arguments = syntax.arguments
elif isinstance(syntax, IdentifierSyntax):
self.name = syntax.name
self.arguments = None
def __call__(self):
# The default implementation; override in subclasses.
# Generate a hint with a list of alternative names.
model = self.name.lower()
arity = None
if self.arguments is not None:
arity = len(self.arguments)
attributes = lookup_attribute_set(self.state.scope)
global_attributes = set()
for component_name in BindByName.__catalogue__():
component_arity = -1
if isinstance(component_name, tuple):
component_name, component_arity = component_name
if isinstance(component_name, str):
component_name = component_name.decode('utf-8')
component_name = component_name.lower()
global_attributes.add((component_name, component_arity))
all_attributes = sorted(attributes|global_attributes)
choices = []
if not choices and arity is None:
names = lookup_reference_set(self.state.scope)
if model in names:
choices = ["a reference '$%s'" % model.encode('utf-8')]
if not choices and arity is None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is not None):
choices = ["a function '%s'" % model.encode('utf-8')]
if not choices and arity is None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity is None and sample != model
and similar(model, sample)]
if not choices and arity is not None \
and not isinstance(self.syntax, OperatorSyntax):
arities = [sample_arity
for sample, sample_arity in all_attributes
if sample == model and
sample_arity not in [None, -1, arity]]
if arities:
required_arity = []
arities.sort()
if len(arities) == 1:
required_arity.append(str(arities[0]))
else:
required_arity.append(", ".join(str(sample_arity)
for sample_arity in arities[:-1]))
required_arity.append("or")
required_arity.append(str(arities[-1]))
if required_arity[-1] == "1":
required_arity.append("argument")
else:
required_arity.append("arguments")
required_arity = " ".join(required_arity)
raise Error("Function '%s' requires %s; got %s"
% (self.syntax.identifier,
required_arity, arity))
if not choices and arity is not None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is None):
choices = ["an attribute '%s'" % model.encode('utf-8')]
if not choices and arity is not None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity in [-1, arity] and sample != model
and similar(model, sample)]
scope_name = guess_tag(self.state.scope)
if scope_name is not None:
scope_name = scope_name.encode('utf-8')
with choices_guard(choices):
if isinstance(self.syntax, (FunctionSyntax, PipeSyntax)):
raise Error("Found unknown function",
self.syntax.identifier)
if isinstance(self.syntax, OperatorSyntax):
raise Error("Found unknown operator",
self.syntax.symbol)
if isinstance(self.syntax, PrefixSyntax):
raise Error("Found unknown unary operator",
self.syntax.symbol)
if isinstance(self.syntax, IdentifierSyntax):
raise Error("Found unknown attribute",
"%s.%s" % (scope_name, self.syntax)
if scope_name is not None else str(self.syntax))
class BindByRecipe(Adapter):
"""
Applies a recipe to generate a binding node.
This is an abstract adapter that generates new binding nodes
from binding recipes. The :class:`BindByRecipe` interface
has the following signature::
BindByRecipe: (Recipe, Syntax, BindingState) -> Binding
The adapter is polymorphic by the first argument.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
A recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`state` (:class:`BindingState`)
The current binding state.
"""
adapt(Recipe)
def __init__(self, recipe, syntax, state):
assert isinstance(recipe, Recipe)
assert isinstance(syntax, Syntax)
assert isinstance(state, BindingState)
self.recipe = recipe
self.syntax = syntax
self.state = state
def __call__(self):
# The default implementation should not be reachable.
raise Error("unable to bind a node")
class BindByLiteral(BindByRecipe):
adapt(LiteralRecipe)
def __call__(self):
return LiteralBinding(self.state.scope,
self.recipe.value,
self.recipe.domain,
self.syntax)
class BindBySelection(BindByRecipe):
adapt(SelectionRecipe)
def __call__(self):
elements = []
for recipe in self.recipe.recipes:
element = self.state.use(recipe, self.syntax)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return SelectionBinding(self.state.scope, elements, domain, self.syntax)
class BindByFreeTable(BindByRecipe):
adapt(FreeTableRecipe)
def __call__(self):
# Produce a free table scope.
return TableBinding(self.state.scope,
self.recipe.table,
self.syntax)
class BindByAttachedTable(BindByRecipe):
adapt(AttachedTableRecipe)
def __call__(self):
return ChainBinding(self.state.scope, self.recipe.joins, self.syntax)
class BindByColumn(BindByRecipe):
adapt(ColumnRecipe)
def __call__(self):
# Generate a link associated with the column.
link = None
if self.recipe.link is not None:
link = self.state.use(self.recipe.link, self.syntax)
# Produce a column scope.
return ColumnBinding(self.state.scope, self.recipe.column,
link, self.syntax)
class BindByKernel(BindByRecipe):
adapt(KernelRecipe)
def __call__(self):
# Generate a kernel expression of a quotient scope.
return KernelBinding(self.state.scope, self.recipe.quotient,
self.recipe.index, self.syntax)
class BindByComplement(BindByRecipe):
adapt(ComplementRecipe)
def __call__(self):
# Generate a complement link to a quotient scope.
return ComplementBinding(self.state.scope,
self.recipe.quotient, self.syntax)
class BindByIdentity(BindByRecipe):
adapt(IdentityRecipe)
def __call__(self):
elements = [self.state.use(recipe, self.syntax)
for recipe in self.recipe.elements]
return IdentityBinding(self.state.scope, elements, self.syntax)
class BindBySubstitution(BindByRecipe):
adapt(SubstitutionRecipe)
def __call__(self):
# Bind the given syntax node in place of an identifier
# or a function call.
# Check if the recipe has a qualifier.
if self.recipe.terms:
# Find the same identifier in the base scope.
assert isinstance(self.syntax, IdentifierSyntax)
name, is_reference = self.recipe.terms[0]
arity = None
if (len(self.recipe.terms) == 1 and
self.recipe.parameters is not None):
arity = len(self.recipe.parameters)
recipe = lookup_attribute(self.recipe.base, self.syntax.name)
if recipe is None:
raise Error("Found unknown attribute", self.syntax)
binding = self.state.use(recipe, self.syntax)
# Check if the term is a reference.
if is_reference:
# Must the the last term in the assignment.
assert len(self.recipe.terms) == 1
# Bind the reference against the scope where it is defined.
body = self.state.bind(self.recipe.body, scope=binding)
recipe = BindingRecipe(body)
# Augment the scope with the tail of the recipe.
else:
recipe = SubstitutionRecipe(binding, self.recipe.terms[1:],
self.recipe.parameters,
self.recipe.body)
recipe = ClosedRecipe(recipe)
if is_reference:
binding = DefineReferenceBinding(binding, name,
recipe, self.syntax)
else:
binding = DefineBinding(binding, name, arity,
recipe, self.syntax)
return binding
# Otherwise, bind the syntax node associated with the recipe.
# Bind against the current scope, but route all lookup requests
# to the scope where the recipe was defined.
scope = self.state.scope
scope = RerouteBinding(scope, self.recipe.base, scope.syntax)
# Bind the parameters.
if self.recipe.parameters is not None:
assert isinstance(self.syntax, ApplySyntax)
assert len(self.syntax.arguments) == len(self.recipe.parameters)
for (name, is_reference), syntax in zip(self.recipe.parameters,
self.syntax.arguments):
binding = self.state.bind(syntax)
recipe = BindingRecipe(binding)
recipe = ClosedRecipe(recipe)
if is_reference:
scope = DefineReferenceBinding(scope, name,
recipe, scope.syntax)
else:
scope = DefineBinding(scope, name, None,
recipe, scope.syntax)
# Bind the syntax node associated with the recipe.
binding = self.state.bind(self.recipe.body, scope=scope)
# Hide all referenced defined there.
binding = ReferenceRerouteBinding(binding, self.state.scope,
binding.syntax)
return binding
class BindByBinding(BindByRecipe):
adapt(BindingRecipe)
def __call__(self):
return self.recipe.binding
class BindByClosed(BindByRecipe):
adapt(ClosedRecipe)
def __call__(self):
# Generate a binding from the given recipe.
binding = self.state.use(self.recipe.recipe, self.syntax)
# Force the current syntax node to the binding.
return AliasBinding(binding, self.syntax)
class BindByChain(BindByRecipe):
adapt(ChainRecipe)
def __call__(self):
binding = self.state.scope
for recipe in self.recipe.recipes:
binding = self.state.use(recipe, self.syntax, scope=binding)
return binding
class BindByPinned(BindByRecipe):
adapt(PinnedRecipe)
def __call__(self):
# Bind the given recipe in the specified scope.
binding = self.state.use(self.recipe.recipe, self.syntax,
scope=self.recipe.scope)
return binding
class BindByAmbiguous(BindByRecipe):
adapt(AmbiguousRecipe)
def __call__(self):
syntax = self.syntax
if isinstance(self.syntax, (FunctionSyntax, PipeSyntax)):
syntax = self.syntax.identifier
int = None
choices = []
if self.recipe.alternatives:
choices = [str(alternative)
for alternative in self.recipe.alternatives]
with choices_guard(choices):
raise Error("Found ambiguous name", syntax)
def bind(syntax, environment=None):
recipes = []
if environment is not None:
for name in sorted(environment):
value = environment[name]
if value.data is None:
recipe = LiteralRecipe(value.data, value.domain)
elif isinstance(value.domain, ListDomain):
item_recipes = [LiteralRecipe(item,
value.domain.item_domain)
for item in value.data]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, RecordDomain):
item_recipes = [LiteralRecipe(item, profile.domain)
for item, profile in
zip(value.data, value.domain.fields)]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, IdentityDomain):
def convert(domain, data):
items = []
for element, item in zip(domain.labels, data):
if isinstance(element, IdentityDomain):
item = convert(element, item)
else:
item = LiteralRecipe(item, element)
items.append(item)
return IdentityRecipe(items)
recipe = convert(value.domain, value.data)
else:
recipe = LiteralRecipe(value.data, value.domain)
recipes.append((name, recipe))
root = RootBinding(syntax)
state = BindingState(root, recipes)
if isinstance(syntax, AssignSyntax):
specifier = syntax.larm
with translate_guard(specifier):
if specifier.identifier is None:
raise Error("Expected an identifier")
identifier = specifier.larms[0]
binding = state.bind(syntax.rarm)
binding = Select.__invoke__(binding, state)
binding = TitleBinding(binding, identifier, binding.syntax)
else:
binding = state.bind(syntax)
binding = Select.__invoke__(binding, state)
return binding
| 37.028107
| 80
| 0.583716
| 5,207
| 50,062
| 5.524678
| 0.111773
| 0.042062
| 0.017207
| 0.01241
| 0.390969
| 0.315118
| 0.243056
| 0.199673
| 0.169187
| 0.142212
| 0
| 0.001863
| 0.335184
| 50,062
| 1,351
| 81
| 37.055514
| 0.862478
| 0.181095
| 0
| 0.387872
| 0
| 0
| 0.02647
| 0
| 0
| 0
| 0
| 0.00074
| 0.024027
| 1
| 0.070938
| false
| 0
| 0.012586
| 0.011442
| 0.202517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11ad8fe6bba3193be56826f292aa054b4c5199e3
| 2,226
|
py
|
Python
|
locuszoom_plotting_service/gwas/tests/factories.py
|
statgen/locuszoom-hosted
|
ecfcc5f48fefe2869ab277202a661c2575af6abb
|
[
"MIT"
] | null | null | null |
locuszoom_plotting_service/gwas/tests/factories.py
|
statgen/locuszoom-hosted
|
ecfcc5f48fefe2869ab277202a661c2575af6abb
|
[
"MIT"
] | 14
|
2021-01-01T17:16:23.000Z
|
2022-02-28T19:37:28.000Z
|
locuszoom_plotting_service/gwas/tests/factories.py
|
statgen/locuszoom-hosted
|
ecfcc5f48fefe2869ab277202a661c2575af6abb
|
[
"MIT"
] | null | null | null |
import os
import random
from django.db.models import signals
from django.utils import timezone
import factory
from factory.django import DjangoModelFactory
from locuszoom_plotting_service.users.tests.factories import UserFactory
from .. import constants as lz_constants
from .. import models as lz_models
def choose_genome_build() -> str:
return random.choice(lz_constants.GENOME_BUILDS)[0]
def choose_consortium() -> str:
return random.choice(['LocusZoom JS', 'LocusZoom Standalone', 'LocusZoom Hosted', 'LocusZoom.org'])
@factory.django.mute_signals(signals.post_save)
class AnalysisFilesetFactory(DjangoModelFactory):
raw_gwas_file = None # Only create temp files if has_data trait is True
ingest_status = 0 # pending (most tests don't run celery tasks, and therefore are "pending" processing)
ingest_complete = None
parser_options = factory.Dict({ # Parser options for standard gwas format
'chrom_col': 1,
'pos_col': 2,
'ref_col': 3,
'alt_col': 4,
'pvalue_col': 5,
'is_neg_log_pvalue': False
})
class Meta:
model = lz_models.AnalysisFileset
class Params:
# Most samples will be fine with a 0B file. Only provide actual data if explicitly requested.
has_data = factory.Trait(
raw_gwas_file=factory.django.FileField(
from_path=os.path.join(os.path.dirname(__file__), 'fixtures/placeholder.txt'))
)
has_completed = factory.Trait( # Marks pipeline complete (without actually running it)
ingest_complete=timezone.now(),
ingest_status=2
)
class AnalysisInfoFactory(DjangoModelFactory):
owner = factory.SubFactory(UserFactory)
label = factory.Faker('sentence', nb_words=2)
study_name = factory.LazyFunction(choose_consortium)
files = factory.SubFactory(AnalysisFilesetFactory)
build = factory.LazyFunction(choose_genome_build)
is_public = False
class Meta:
model = lz_models.AnalysisInfo
class ViewLinkFactory(DjangoModelFactory):
label = factory.Faker('sentence', nb_words=2)
gwas = factory.SubFactory(AnalysisInfoFactory)
class Meta:
model = lz_models.ViewLink
| 29.68
| 108
| 0.709344
| 267
| 2,226
| 5.741573
| 0.501873
| 0.020874
| 0.027397
| 0.031311
| 0.092629
| 0.078278
| 0.043053
| 0
| 0
| 0
| 0
| 0.006232
| 0.207098
| 2,226
| 74
| 109
| 30.081081
| 0.862323
| 0.142857
| 0
| 0.098039
| 0
| 0
| 0.08307
| 0.012618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.176471
| 0.039216
| 0.627451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11b627ad398f9ae3625b734210d1a5d1347b9bf2
| 1,700
|
py
|
Python
|
pantofola_search/management/commands/_private.py
|
phingage/pantofola.io
|
f41036d2e568a45f328e2a7ca81d76a27cd134dc
|
[
"WTFPL"
] | 1
|
2018-06-09T22:20:00.000Z
|
2018-06-09T22:20:00.000Z
|
pantofola_search/management/commands/_private.py
|
phingage/pantofola.io
|
f41036d2e568a45f328e2a7ca81d76a27cd134dc
|
[
"WTFPL"
] | 4
|
2020-02-11T22:01:16.000Z
|
2021-06-10T17:38:56.000Z
|
pantofola_search/management/commands/_private.py
|
phingage/pantofola.io
|
f41036d2e568a45f328e2a7ca81d76a27cd134dc
|
[
"WTFPL"
] | null | null | null |
from pantofola_search.models import *
from pantofola_search.tools.imdb_fetcher import ImdbFetcher
def update_new_movie_info(clean_title, imdb_id, torrent, is_imdb=False):
my_imdb = ImdbFetcher()
if not Movie.objects.filter(pk=imdb_id).exists():
# #[imdb_id,year,max_ratio,[titles[1]]]
movie_info = my_imdb.query_movie_info(imdb_id, clean_title)
movie = Movie(imdb_id=movie_info[0],
year=movie_info[1],
original_title=movie_info[3][0])
movie.save()
for aka in movie_info[3][1]:
movie.title_set.create(title=aka)
for forg in movie_info[3][2]:
movie.foreigntitle_set.create(title=forg[0], language=forg[1])
max_ratio = movie_info[2]
# print movie_info, tags, lang_tag
else:
movie = Movie.objects.get(pk=imdb_id)
score_title = [movie.original_title]
for aka_q in movie.title_set.all():
score_title.append(aka_q.title)
max_ratio = my_imdb.compute_score(clean_title, score_title)
alarm_ratio = False
if float(max_ratio) < 0.5 and not is_imdb:
alarm_ratio = True
torrent.movie = movie
torrent.score = max_ratio
torrent.broken = alarm_ratio
# torrent.ready_to_recheck = False
if is_imdb:
#torrent.sanitized_name = movie.original_title
torrent.score = 1
torrent.broken = False
torrent.save()
def check_for_title_in_db(clean_title):
t_e = Torrent.objects.filter(sanitized_name__exact=clean_title,
broken=False, ready_to_recheck=False).first()
if t_e:
return t_e.movie.imdb_id
else:
return None
| 36.956522
| 78
| 0.648824
| 239
| 1,700
| 4.322176
| 0.317992
| 0.087125
| 0.029042
| 0.023233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011848
| 0.255294
| 1,700
| 46
| 79
| 36.956522
| 0.804107
| 0.086471
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11b6a22d0d9d730ae6441343ec296d67f55adf10
| 7,663
|
py
|
Python
|
ArcLint.py
|
namur007/ArcLint
|
b17b39cf7fdfeff144339b6f3494d9120eafde90
|
[
"MIT"
] | null | null | null |
ArcLint.py
|
namur007/ArcLint
|
b17b39cf7fdfeff144339b6f3494d9120eafde90
|
[
"MIT"
] | 4
|
2020-07-17T18:11:54.000Z
|
2020-07-26T12:34:57.000Z
|
ArcLint.py
|
namur007/ArcLint
|
b17b39cf7fdfeff144339b6f3494d9120eafde90
|
[
"MIT"
] | null | null | null |
import json
import re
import datetime
import os
import arcpy
regex_flag_dict = {
# 'ASCII' re.A, # this is py3 only so wont work in arcgis desktop
'IGNORECASE': re.I,
'LOCALE': re.L,
"MULTILINE": re.M,
"DOTMATCH": re.S,
"UNICODE": re.U,
"VERBOSE": re.X,
}
def main(json_path, feature, output_location=None, output_file_name=None):
output_file = format_output_file(output_location, output_file_name)
start_time = datetime.datetime.now()
start_str = start_time.strftime("%Y-%m-%d %H:%M:%S")
json_obj = read_json(json_path)
rule_data = compile_rules(json_obj)
results = _arc_process(rule_data, feature)
save_json(format_results(results, start_str), output_file)
def format_output_file(output_location, output_file_name):
if output_location is None:
output_location = ""
if output_file_name is None:
output_file_name = "results.json"
if not output_file_name.endswith(".json"):
output_file_name += ".json"
return os.path.join(output_location, output_file_name)
def read_json(_json_path):
js = None
with open(_json_path, 'r') as fl:
js = json.loads(fl.read())
return js
def save_json(_json_data, output_file):
with open(output_file, 'w') as fl:
fl.write(json.dumps(_json_data))
def format_results(rule_data, _datetime_str):
# format fields
# format groups
out_fields = {}
out_groups = {}
for field in rule_data['Fields']:
field_result = []
for rule in rule_data['Fields'][field]:
if not rule['output']:
continue
field_result.append({
"ruleName": rule['ruleName'],
"errorIDs": rule['result']
})
if len(field_result) == 0:
continue
out_fields[field] = field_result
for group in rule_data['Groups']:
out_groups[group] = {
"errorIDs": rule_data['Groups'][group]['result'],
'description': rule_data['Groups'][group]['description']
}
result = {
"run_datetime": _datetime_str,
"fields": out_fields,
"groups": out_groups,
}
return result
def _arc_process(rule_data, feature):
"""
impure function as i am modifying the rule_data
input = {
"Rules": rule_dict,
"Fields": field_dict,
"Groups": group_dict
}
returns dictionary of the rules"""
fields = [field for field in rule_data['Fields']]
with arcpy.da.SearchCursor(feature, ["OID@"] + fields) as sc:
for row in sc:
_id = row[0]
for ix, value in enumerate(row[1:]):
field_rules = rule_data['Fields'][fields[ix]]
# append ID to each rule if they test = False
[rule['result'].append(_id) for rule in field_rules if rule['rule'](value)]
for group_name in rule_data['Groups']:
group = rule_data['Groups'][group_name]
group_func = any if group.get('match') == 'any' else all
group_result = group_func([True if _id in r['result'] else False for r in group['rules']])
if group_result == True:
group['result'].append(_id)
return rule_data
# region Linters
def regex_lint(value, _regex):
# if regex is good, return true, else return false
if len(_regex.findall(str(value))) > 0:
return True
else:
return False
def range_lint(value, firstValue, secondValue, outside):
lv = min(firstValue, secondValue)
mx = max(firstValue, secondValue)
result = True if value >= lv and value <= mx else False
result = not result if outside else result
return result
# region builders
def compile_rules(json_obj):
rule_dict = _compile_global_rules(json_obj)
field_dict = _compile_field_rules(json_obj, rule_dict)
group_dict = _compile_group_rules(json_obj, field_dict)
return {
"Rules": rule_dict,
"Fields": field_dict,
"Groups": group_dict
}
def _compile_global_rules(json_obj):
"""
returns
rule name is either global_RULENAME for global or fieldname_RULENAME for field specific ones
{
rule_name: rule_function > str: function
}
"""
rule_dict = {}
for rule in json_obj.get('globalRules', []):
rule_name = rule.get('ruleName', '').upper()
nm = 'global_{}'.format(rule_name)
f = _parse_rule(rule)
rule_dict[nm] = f
return rule_dict
def _compile_field_rules(json_obj, rule_dict):
"""
returns:
{
FieldName > str: {
'result': [] > str: list,
'ruleName': ruleName > str: str,
'rule': rule_dict[fieldname_rule_name] > str: function,
}
}
"""
field_dict = {}
for field in json_obj.get('fields', []):
field_rules = []
field_name = field.get('fieldName')
for rule in field.get('rules', []):
rule_name = rule.get('ruleName', '').upper()
rule_type = rule.get('type')
output_rule = rule.get('output', True)
nm = None
if rule_type is None and 'global_{}'.format(rule_name) in rule_dict:
nm = 'global_{}'.format(rule_name)
else:
nm = '{}_{}'.format(field_name, rule_name)
rule_dict[nm] = _parse_rule(rule)
field_rules.append({
'result': [],
'ruleName': rule_name,
'rule': rule_dict[nm],
'output': output_rule
})
field_dict[field_name] = field_rules
return field_dict
def _compile_group_rules(json_obj, field_dict):
"""
rules are the address to the rule from the field dictionary. when updating the result in the field results, should be available here
returns
{
group_name: {
"result": [], # array of ids with errors,
"match": "all" or "any", # type of match to test for
"rules": [group_rules], # array of the rules for this group
}
}
"""
group_dict = {}
for group in json_obj.get("ruleGroups", []):
group_name = group.get("groupName", "")
match_type = group.get("match", "")
group_rules = []
for rule in group.get("rules", []):
f = rule.get("fieldName")
rn = rule.get("ruleName","").upper()
group_rules += [r for r in field_dict[f] if r['ruleName']==rn]
group_dict[group_name] = {
"result": [],
"match": match_type,
"rules": group_rules,
"description": group.get('description', '')
}
return group_dict
def _parse_rule(rule):
func_dct = {
'regex': _parse_regex,
'range': _parse_range
}
return func_dct[rule.get('type')](rule)
# region parse rules
def _parse_regex(rule):
_pattern = rule.get('pattern')
flags = rule.get('flags', [])
pat_flags = 0
for f in flags:
if f is None:
continue
pat_flags |= regex_flag_dict.get(f.upper(), 0)
_regex = re.compile(_pattern, pat_flags)
def f(x): return regex_lint(x, _regex)
return f
def _parse_range(rule):
f_value = rule.get('fromValue')
s_value = rule.get('toValue')
outside = rule.get('outside', False)
def f(x): return range_lint(x, f_value, s_value, outside)
return f
if __name__ == "__main__":
feat = r"C:\Users\scody\Desktop\ArcPro Model\AllPipes2020\Data\ModelNetwork.gdb\facility_junction"
main('facil_jct.json', feat)
| 27.66426
| 136
| 0.588542
| 964
| 7,663
| 4.431535
| 0.190871
| 0.02809
| 0.026217
| 0.01779
| 0.160346
| 0.095506
| 0.071161
| 0.04073
| 0.020131
| 0
| 0
| 0.002025
| 0.291139
| 7,663
| 276
| 137
| 27.764493
| 0.784426
| 0.141328
| 0
| 0.087209
| 0
| 0
| 0.102556
| 0.01356
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098837
| false
| 0
| 0.02907
| 0.011628
| 0.209302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11b7d8f84ea9074863867abdbc15c4a61c060614
| 1,710
|
py
|
Python
|
files/persona_dao.py
|
DaletWolff/Curso_postgresql
|
a9d716236b1a840f104c98a4982eab9b1ad641ba
|
[
"Unlicense"
] | null | null | null |
files/persona_dao.py
|
DaletWolff/Curso_postgresql
|
a9d716236b1a840f104c98a4982eab9b1ad641ba
|
[
"Unlicense"
] | null | null | null |
files/persona_dao.py
|
DaletWolff/Curso_postgresql
|
a9d716236b1a840f104c98a4982eab9b1ad641ba
|
[
"Unlicense"
] | null | null | null |
from persona import Persona
from logger_base import log
from cursor import Cursor
class PersonaDAO:
_SELECCIONAR = 'SELECT * FROM persona ORDER BY id_persona'
_INSERTAR = 'INSERT INTO persona(nombre, apellido, email) VALUES(%s, %s, %s)'
_ACTUALIZAR = 'UPDATE persona SET nombre=%s, apellido=%s, email=%s WHERE id_persona=%s'
_ELIMINAR = 'DELETE FROM persona WHERE id_persona=%s'
@classmethod
def seleccionar(cls):
with Cursor() as cursor:
cursor.execute(cls._SELECCIONAR)
registros = cursor.fetchall()
personas = []
for registro in registros:
persona = Persona(registro[0], registro[1], registro[2], registro[3])
personas.append(persona)
return personas
@classmethod
def insertar(cls, persona):
with Cursor() as cursor:
valores = (persona.nombre, persona.apellido, persona.email)
cursor.execute(cls._INSERTAR, valores)
log.debug(f"Persona insertada: {persona}")
return cursor.rowcount
@classmethod
def actualizar(cls, persona):
with Cursor() as cursor:
valores = (persona.nombre, persona.apellido, persona.email, persona.id_persona)
cursor.execute(cls._ACTUALIZAR, valores)
log.debug(f'Persona actualizada: {persona}')
return cursor.rowcount
@classmethod
def eliminar(cls, persona):
with Cursor() as cursor:
valores = (persona.id_persona,)
cursor.execute(cls._ELIMINAR, valores)
log.debug(f'Persona eliminada: {persona}')
return cursor.rowcount
| 38
| 92
| 0.612281
| 181
| 1,710
| 5.707182
| 0.292818
| 0.043562
| 0.046467
| 0.0697
| 0.387222
| 0.320426
| 0.185866
| 0.185866
| 0.145208
| 0.145208
| 0
| 0.003309
| 0.292982
| 1,710
| 45
| 93
| 38
| 0.851117
| 0
| 0
| 0.282051
| 0
| 0
| 0.179964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.410256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c058f314fcdf27f630e4e67e934c957629b5a4
| 1,000
|
py
|
Python
|
pype9/cmd/convert.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | null | null | null |
pype9/cmd/convert.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | null | null | null |
pype9/cmd/convert.py
|
tclose/Pype9
|
23f96c0885fd9df12d9d11ff800f816520e4b17a
|
[
"MIT"
] | 1
|
2021-04-08T12:46:21.000Z
|
2021-04-08T12:46:21.000Z
|
"""
Tool to convert 9ML files between different supported formats (e.g. XML_,
JSON_, YAML_) and 9ML versions.
"""
from argparse import ArgumentParser
from pype9.utils.arguments import nineml_document
from pype9.utils.logging import logger
def argparser():
parser = ArgumentParser(prog='pype9 convert',
description=__doc__)
parser.add_argument('in_file', type=nineml_document,
help="9ML file to be converted")
parser.add_argument('out_file', help="Converted filename")
parser.add_argument('--nineml_version', '-v', type=str, default=None,
help="The version of nineml to output")
return parser
def run(argv):
args = argparser().parse_args(argv)
doc = args.in_file.clone()
kwargs = {}
if args.nineml_version is not None:
kwargs['version'] = args.nineml_version
doc.write(args.out_file, **kwargs)
logger.info("Converted '{}' to '{}'".format(args.in_file, args.out_file))
| 33.333333
| 77
| 0.664
| 127
| 1,000
| 5.055118
| 0.503937
| 0.042056
| 0.079439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007673
| 0.218
| 1,000
| 29
| 78
| 34.482759
| 0.813299
| 0.105
| 0
| 0
| 0
| 0
| 0.166855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c365d4ccc71a94837656d754364a0fe60f8958
| 3,615
|
py
|
Python
|
Tools/MakeHDF.py
|
Kadantte/VideoSuperResolution
|
4c86e49d81c7a9bea1fe0780d651afc126768df3
|
[
"MIT"
] | 1,447
|
2018-06-04T08:44:07.000Z
|
2022-03-29T06:19:10.000Z
|
Tools/MakeHDF.py
|
Evergreengyq/VideoSuperResolution
|
1d0c54fafaf7a02f0d69408502f90c55f0f76536
|
[
"MIT"
] | 96
|
2018-08-29T01:02:45.000Z
|
2022-01-12T06:00:01.000Z
|
Tools/MakeHDF.py
|
Evergreengyq/VideoSuperResolution
|
1d0c54fafaf7a02f0d69408502f90c55f0f76536
|
[
"MIT"
] | 307
|
2018-06-26T13:35:54.000Z
|
2022-01-21T09:01:54.000Z
|
# Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 下午5:03
import argparse
import time
from pathlib import Path
import h5py
import numpy as np
import tqdm
from PIL import Image
__all__ = ["gather_videos_vqp", "gather_videos", "print_dataset"]
parser = argparse.ArgumentParser(description="Make HDF5 datasets")
parser.add_argument("input_dir", help="path of the input root folder.")
parser.add_argument("-o", "--output", help="output hdf file path.")
parser.add_argument("-a", "--append", action='store_true')
parser.add_argument("-t", "--task_name", choices=__all__, help="task name")
parser.add_argument("--compression", type=int, default=None)
parser.add_argument("--glob", help="glob pattern to gather files inside input."
"For recursively glob, use **/*.")
parser.add_argument("--data_format",
choices=('channels_first', 'channels_last'),
default='channels_first', help="data format (default: CHW)")
FLAGS, args = parser.parse_known_args()
def make_hdf_header():
if FLAGS.output:
if FLAGS.append:
fd = h5py.File(FLAGS.output, 'a')
else:
fd = h5py.File(FLAGS.output, 'w')
fd.attrs['author'] = 'LoSealL'
fd.attrs['email'] = 'wenyi.tang@intel.com'
fd.attrs['date'] = time.strftime("%Y-%m-%d")
fd.attrs['data_format'] = FLAGS.data_format
return fd
def flush_hdf(fd: h5py.File):
if isinstance(fd, h5py.File):
fd.close()
def gather_videos_vqp(fd: h5py.File):
"""Specified for VQP"""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
cube = np.expand_dims(cube, 0)
path = p.relative_to(root)
# ugly
path = path.parent / path.stem.split('_')[0]
key = str(path.as_posix())
if not key in fd:
fd.create_dataset(key, data=cube,
maxshape=(52,) + cube.shape[1:],
compression=FLAGS.compression)
frames_info[key] = len(seq)
else:
d = fd[key]
cnt = d.shape[0] + 1
d.resize(cnt, 0)
d[-1] = cube
del cube
def gather_videos(fd: h5py.File):
"""Gather videos. Video is defined in a folder containing sequential images."""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
path = p.relative_to(root)
key = str(path.as_posix())
fd.create_dataset(key, data=cube, compression=FLAGS.compression)
frames_info[key] = len(seq)
del cube
fd.attrs['frames_info'] = list(frames_info.items())
def print_dataset(*args):
def _print(name, obj):
print(f"key: [{name}], shape: {obj.shape}")
fd = Path(FLAGS.input_dir)
if fd.exists():
with h5py.File(str(fd), 'r') as fd:
fd.visititems(_print)
def main():
fd = make_hdf_header()
globals()[FLAGS.task_name](fd)
flush_hdf(fd)
if __name__ == '__main__':
main()
| 30.125
| 81
| 0.634302
| 526
| 3,615
| 4.214829
| 0.302281
| 0.028417
| 0.053676
| 0.027064
| 0.372576
| 0.301308
| 0.277853
| 0.277853
| 0.236355
| 0.236355
| 0
| 0.015009
| 0.207469
| 3,615
| 119
| 82
| 30.378151
| 0.758813
| 0.059198
| 0
| 0.347826
| 0
| 0
| 0.145895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0
| 0.076087
| 0
| 0.163043
| 0.054348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c45856fc39f00ce8b427bda4629a69a7f9c3b7
| 1,480
|
py
|
Python
|
modules/ddg_appwv_cookies.py
|
ItWasDNS/DDG-Parser
|
fd63099df7b93a603b9fe2ae4259c232f0555a65
|
[
"MIT"
] | null | null | null |
modules/ddg_appwv_cookies.py
|
ItWasDNS/DDG-Parser
|
fd63099df7b93a603b9fe2ae4259c232f0555a65
|
[
"MIT"
] | null | null | null |
modules/ddg_appwv_cookies.py
|
ItWasDNS/DDG-Parser
|
fd63099df7b93a603b9fe2ae4259c232f0555a65
|
[
"MIT"
] | null | null | null |
"""
Process 'com.duckduckgo.mobile.android/app_webview/Cookies'
"""
import os
import sqlite3
from modules.helpers.ddg_path_handler import process_directory_paths
query_cookies = """
SELECT
host_key,
path,
name,
value,
creation_utc,
last_access_utc,
expires_utc,
secure,
httponly,
persistent,
encrypted_value
FROM cookies;
"""
cookies_template = """--
Host: %s
Path: %s
Cookie Name: %s
Cookie Value: %s
Cookie Creation: %s
Cookie Expiration: %s
"""
def process_appwv_cookies(duckduckgo_path, output_path):
""" Process DDG 'Cookies' database """
with open(os.path.join(output_path, 'appwv_cookies_output.txt'), 'w') as o:
o.write("Processed: 'com.duckduckgo.mobile.android/app_webview/Cookies'\n")
try:
conn = sqlite3.connect(duckduckgo_path + 'app_webview/Cookies')
answer = conn.execute(query_cookies).fetchall()
conn.close()
except sqlite3.OperationalError as e:
o.write("Error: %s" % str(e))
return None
if len(answer) == 0:
o.write("No Cookies Found in app_webview/Cookies")
return None
for result in answer:
o.write(cookies_template % (result[0], result[1], result[2], result[3], result[4], result[5]))
if __name__ == '__main__':
# Set DDG application data path for testing
ddg_path, out_path = process_directory_paths()
# Process artifacts
process_appwv_cookies(ddg_path, out_path)
| 25.964912
| 107
| 0.663514
| 190
| 1,480
| 4.947368
| 0.452632
| 0.042553
| 0.07234
| 0.055319
| 0.091489
| 0.091489
| 0.091489
| 0
| 0
| 0
| 0
| 0.008651
| 0.218919
| 1,480
| 56
| 108
| 26.428571
| 0.804498
| 0.102703
| 0
| 0.090909
| 0
| 0
| 0.323147
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.068182
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c4b04fb594071b02b7ee34e2b0b343fa536a12
| 3,382
|
py
|
Python
|
scripts/redis_performance_test.py
|
Robbybp/IDAES-CLC
|
5498aeab070afe5f3dc57be4cd198250f0f88ff9
|
[
"MIT"
] | null | null | null |
scripts/redis_performance_test.py
|
Robbybp/IDAES-CLC
|
5498aeab070afe5f3dc57be4cd198250f0f88ff9
|
[
"MIT"
] | 1
|
2021-06-01T23:42:14.000Z
|
2021-06-01T23:42:14.000Z
|
scripts/redis_performance_test.py
|
Robbybp/IDAES-CLC
|
5498aeab070afe5f3dc57be4cd198250f0f88ff9
|
[
"MIT"
] | null | null | null |
"""
A simple and short Redis performance test.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/8/16'
import argparse
import logging
import os
import redis
import subprocess
import sys
import time
_log = logging.getLogger(__name__)
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter('%(asctime)s %(levelname)10s - %(message)s'))
_log.addHandler(_h)
def run_server(binpath=None):
_log.info("Run Redis server")
if binpath:
server_cmd = os.path.join(binpath, 'redis-server')
else:
server_cmd = 'redis-server'
retcode = subprocess.Popen([server_cmd])
return retcode
def run_performance_test(cmd='set', num_items=1000, list_len=5):
_log.info("Run Performance test")
r = redis.StrictRedis(host='localhost', port=6379, db=0)
data = ['bar'] * list_len
if cmd == 'set':
t0 = time.time()
t1 = redis_set(r, num_items, data)
elif cmd == 'get':
redis_set(r, num_items, data)
t0 = time.time()
t1 = redis_get(r, num_items)
elif cmd == 'mix':
t0 = time.time()
t1 = redis_getset(r, num_items, data)
else:
_log.error('Bad command: {}'.format(cmd))
return
report_timing(True, cmd, t1 - t0, num_items, ['list-length'], ['{}'.format(list_len)])
def redis_set(r, num_items, data):
i = 0
while i < num_items:
key = 'foo' + str(i)
r.set(key, data)
i += 1
return time.time()
def redis_get(r, num_items):
i = 0
while i < num_items:
key = 'foo' + str(i)
data = r.get(key)
i += 1
return time.time()
def redis_getset(r, num_items, data):
i = 0
while i < num_items:
key = 'foo' + str(i)
r.set(key, data)
data2 = r.get(key)
i += 1
return time.time()
def report_timing(readable, mode, dt, n, info_hdr, info):
rate = 1. * n / dt
gap = 1. * dt / n
if readable:
kvp = ', '.join(['{}={}'.format(k, v) for k, v in zip(info_hdr, info)])
print("{}: Processed {:d} items in {:.3f} seconds: {:.1f} items/sec <-> {:.6f} seconds/item. {}"
.format(mode, n, dt, rate, gap, kvp))
else:
print('blah')
def verbose_add(parser):
"""Add a verbosity argument to an ArgumentParser.
"""
parser.add_argument('-v', '--verbose', dest='vb',
action='count', default=0)
def verbose_set_log(vb, log):
"""Set logging level from verbosity level.
"""
if vb >= 2:
log.setLevel(logging.DEBUG)
elif vb >= 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARN)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', dest='mode', help='mode: get, set, mix')
parser.add_argument('-n', '--count', dest='count', type=int, help='iterations', default=1000)
parser.add_argument('-z', '--length', dest='len', type=int, help='list length', default=5)
parser.add_argument('-s', '--server', dest='server', action='store_true')
verbose_add(parser)
args = parser.parse_args()
verbose_set_log(args.vb, _log)
if args.server:
retcode = run_server()
_log.info("Redis server stopped")
return retcode
else:
run_performance_test(cmd=args.mode, num_items=args.count, list_len=args.len)
if __name__ == '__main__':
sys.exit(main())
| 28.661017
| 104
| 0.59521
| 469
| 3,382
| 4.121535
| 0.302772
| 0.053802
| 0.032592
| 0.033626
| 0.1821
| 0.148474
| 0.107087
| 0.092085
| 0.092085
| 0.065184
| 0
| 0.017221
| 0.24453
| 3,382
| 117
| 105
| 28.905983
| 0.739335
| 0.0411
| 0
| 0.278351
| 0
| 0.010309
| 0.138605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092784
| false
| 0
| 0.072165
| 0
| 0.226804
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c756cc812aa8aa64b2f69c97b3ae507b530f8b
| 1,323
|
py
|
Python
|
Question-1.py
|
sowmyamanojna/CS6910-Deep-Learning-Assignment-1
|
e46d3a82bdfb61d7527ed3daf9250bb4ce228854
|
[
"MIT"
] | null | null | null |
Question-1.py
|
sowmyamanojna/CS6910-Deep-Learning-Assignment-1
|
e46d3a82bdfb61d7527ed3daf9250bb4ce228854
|
[
"MIT"
] | null | null | null |
Question-1.py
|
sowmyamanojna/CS6910-Deep-Learning-Assignment-1
|
e46d3a82bdfb61d7527ed3daf9250bb4ce228854
|
[
"MIT"
] | null | null | null |
print("Importing packages... ", end="")
##############################################################################
import wandb
import numpy as np
from keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
wandb.init(project="trail-1")
print("Done!")
##############################################################################
print("Loading data... ", end="")
# Load the dataset
[(x_train, y_train), (x_test, y_test)] = fashion_mnist.load_data()
# Get the number of classes and their name mappings
num_classes = 10
class_mapping = {0: "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle boot"}
print("Done!")
##############################################################################
# Plotting a figure from each class
plt.figure(figsize=[12, 5])
img_list = []
class_list = []
for i in range(num_classes):
position = np.argmax(y_train==i)
image = x_train[position,:,:]
plt.subplot(2, 5, i+1)
plt.imshow(image)
plt.title(class_mapping[i])
img_list.append(image)
class_list.append(class_mapping[i])
wandb.log({"Question 1": [wandb.Image(img, caption=caption) for img, caption in zip(img_list, class_list)]})
##############################################################################
| 35.756757
| 152
| 0.517763
| 159
| 1,323
| 4.18239
| 0.54717
| 0.054135
| 0.03609
| 0.04812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017109
| 0.116402
| 1,323
| 36
| 153
| 36.75
| 0.551754
| 0.075586
| 0
| 0.083333
| 0
| 0
| 0.144432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.208333
| 0
| 0.208333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11c77e0e125890c44783034eeeb3c9b9a0ff0a7d
| 1,386
|
py
|
Python
|
app/api/v1/task.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
app/api/v1/task.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
app/api/v1/task.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : icode_flask_be
# @Package : task
# @Author : jackeroo
# @Time : 2019/11/29 5:25 下午
# @File : task.py
# @Contact :
# @Software : PyCharm
# @Desc :
from app.extensions import celery
from flask_jwt_extended import jwt_required
from app.helper.HttpHelper import HttpHelper
from app.libs.redprint import RedPrint
api = RedPrint('task')
@api.route('/<task_id>', methods=['GET'])
@jwt_required
def get_task_result(task_id):
task = celery.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background job
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return HttpHelper.normal_handler(response)
| 27.72
| 69
| 0.554113
| 159
| 1,386
| 4.754717
| 0.503145
| 0.063492
| 0.06746
| 0.087302
| 0.115079
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018653
| 0.303752
| 1,386
| 49
| 70
| 28.285714
| 0.764767
| 0.219336
| 0
| 0.242424
| 0
| 0
| 0.136576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.181818
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11cc4762ea46108968ee8aa2c98fc1627da5eca3
| 981
|
py
|
Python
|
pypy/jit/codegen/ppc/test/test_rgenop.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/jit/codegen/ppc/test/test_rgenop.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/jit/codegen/ppc/test/test_rgenop.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import py
from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
from pypy.rpython.lltypesystem import lltype
from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests, FUNC, FUNC2
from ctypes import cast, c_int, c_void_p, CFUNCTYPE
from pypy.jit.codegen.ppc import instruction as insn
# for the individual tests see
# ====> ../../test/rgenop_tests.py
class FewRegisters(RPPCGenOp):
freeregs = {
insn.GP_REGISTER:insn.gprs[3:6],
insn.FP_REGISTER:insn.fprs,
insn.CR_FIELD:insn.crfs[:1],
insn.CT_REGISTER:[insn.ctr]}
class FewRegistersAndScribble(FewRegisters):
DEBUG_SCRIBBLE = True
class TestRPPCGenop(AbstractRGenOpTests):
RGenOp = RPPCGenOp
class TestRPPCGenopNoRegs(TestRPPCGenop):
RGenOp = FewRegisters
def compile(self, runner, argtypes):
py.test.skip("Skip compiled tests w/ restricted register allocator")
class TestRPPCGenopNoRegsAndScribble(TestRPPCGenopNoRegs):
RGenOp = FewRegistersAndScribble
| 29.727273
| 79
| 0.755352
| 118
| 981
| 6.194915
| 0.542373
| 0.043776
| 0.045144
| 0.073871
| 0.057456
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004813
| 0.152905
| 981
| 32
| 80
| 30.65625
| 0.87485
| 0.062181
| 0
| 0
| 0
| 0
| 0.056707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.772727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11cd2cba6c6fa6a758300d6008e0f69f4e32d609
| 996
|
py
|
Python
|
app/someapp/views.py
|
artas728/monitoring-example-prometheus-grafana
|
2d72f29c19e8a280eca82ca1f25a7fa88453559c
|
[
"MIT"
] | null | null | null |
app/someapp/views.py
|
artas728/monitoring-example-prometheus-grafana
|
2d72f29c19e8a280eca82ca1f25a7fa88453559c
|
[
"MIT"
] | null | null | null |
app/someapp/views.py
|
artas728/monitoring-example-prometheus-grafana
|
2d72f29c19e8a280eca82ca1f25a7fa88453559c
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .models import TestModel
import json
import redis
import time
redis_cli = redis.Redis(host='127.0.0.1', port=6379, db=0)
@csrf_exempt
def save_to_redis(request):
data = json.loads(request.body.decode())
for key, value in data.items():
redis_cli.rpush(key, value)
return JsonResponse({'success': True, 'data': 'Saved in Redis'})
@csrf_exempt
def endpoint(request):
time.sleep(0.1)
return JsonResponse({'success': True, 'data': 'Request processed'})
@csrf_exempt
def write_to_db(request):
data = json.loads(request.body.decode())
for row in data:
TestModel.objects.create(key1=row['key1'],
key2=row['key2'],
key3=row['key3'],
key4=row['key4'])
return JsonResponse({'success': True, 'data': 'Data has been saved'})
| 29.294118
| 73
| 0.638554
| 130
| 996
| 4.815385
| 0.423077
| 0.063898
| 0.0623
| 0.138978
| 0.285942
| 0.127796
| 0.127796
| 0.127796
| 0
| 0
| 0
| 0.027487
| 0.232932
| 996
| 33
| 74
| 30.181818
| 0.791885
| 0
| 0
| 0.185185
| 0
| 0
| 0.108652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.259259
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11d1192c076a5c79df7f15736899d5d72fa6cb5f
| 1,401
|
py
|
Python
|
NewEventReporter/blockmanager/blockmanager.py
|
Deofex/GETNFTBOTV3
|
0b8f1a77925b8f87224b2eaae93560e154b881b8
|
[
"MIT"
] | null | null | null |
NewEventReporter/blockmanager/blockmanager.py
|
Deofex/GETNFTBOTV3
|
0b8f1a77925b8f87224b2eaae93560e154b881b8
|
[
"MIT"
] | null | null | null |
NewEventReporter/blockmanager/blockmanager.py
|
Deofex/GETNFTBOTV3
|
0b8f1a77925b8f87224b2eaae93560e154b881b8
|
[
"MIT"
] | null | null | null |
import logging
import json
import os
# Initialize logger
logger = logging.getLogger(__name__)
class BlockManager():
def __init__(self, config, processedblock=0):
logger.info('Initialize Block Manager')
self.processedblock = int(processedblock)
self.config = config
if os.path.isfile(self.config):
self.load_config()
def set_processedblock(self,processedblock):
if int(processedblock) <= self.processedblock:
logger.warning('Block will not be set because block is '
'lower or equal than the previous block')
return
logger.info('Set processed block on: {}'.format(processedblock))
self.processedblock = int(processedblock)
blockconfig = {
'processedblock': processedblock
}
with open(self.config, 'w') as config:
json.dump(blockconfig, config)
def load_config(self):
logger.info('Loading config')
with open(self.config, 'r') as config:
loadconfig = json.load(config)
self.set_processedblock(loadconfig['processedblock'])
def get_processedblock(self):
return self.processedblock
if __name__ == '__main__':
blockprocessedconfig = './config/blockprocessed.json'
bm = BlockManager(blockprocessedconfig,300000)
bm.set_processedblock(242443)
print(bm.get_processedblock())
| 31.133333
| 72
| 0.660243
| 145
| 1,401
| 6.22069
| 0.4
| 0.055432
| 0.10643
| 0.077605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012241
| 0.24197
| 1,401
| 44
| 73
| 31.840909
| 0.8371
| 0.012134
| 0
| 0.057143
| 0
| 0
| 0.149783
| 0.02026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.085714
| 0.028571
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11d3d683bc5376ecd600cfbd620489e72ca787ca
| 5,299
|
py
|
Python
|
nmf_eval.py
|
logan-wright/INMF
|
611ccdfd4608ec37629975d04e013ab97e05ff31
|
[
"Apache-2.0"
] | 2
|
2017-06-16T19:18:53.000Z
|
2019-04-18T02:11:45.000Z
|
nmf_eval.py
|
logan-wright/INMF
|
611ccdfd4608ec37629975d04e013ab97e05ff31
|
[
"Apache-2.0"
] | null | null | null |
nmf_eval.py
|
logan-wright/INMF
|
611ccdfd4608ec37629975d04e013ab97e05ff31
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 20:35:49 2017
@author: wrightad
"""
import numpy as N
import matplotlib.pyplot as plt
def rmse(v1,v2):
'''
rmse(v1,v2) - Calculates the root mean square error between two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Root-Mean-Square-Error between two vectors
- Vectors must be the same length
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
rmse, the rmse value for the comparison of the two vectors
'''
dims1 = v1.shape
dims2 = v2.shape
if dims1 == dims2:
diff = v1 - v2
err = N.sum(diff**2)/dims1[0]
rms = N.sqrt(err)
else:
print('Dimension Mismatch: v1.shape ~= v2.shape!')
rms = None
return rms
def sid(v1,v2):
'''
sid(v1,v2) - Calculates the spectral information divergence (SID) between
two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Spectral Information Divergence between two vectors
- Vectors must be the same length
Reference:
Chang, C.-I. (2000), An information-theoretic approach to spectral
variability, similarity, and discrimination for hyperspectral image
analysis, Inf. Theory, IEEE Trans., 46(5), 1927–1932,
doi:10.1109/18.857802.
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
SID, the SID value for the comparison of the two vectors
'''
p = v1 / N.sum(v1)
q = v2 / N.sum(v2)
D1 = N.sum(p * N.log(p / q))
D2 = N.sum(q * N.log(q / p))
D_sum = D1 + D2
return D_sum
def scattering_fit(data, function, sigma = 1e-9):
'''
Linear least-squares fit for a function of the form y = a * f(x)
Version 1.0
Created On: Apr 17, 2017
Last Modified: Apr 17, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Inputs:
wvl, wavelength in NANOMETERS, must be same length as data and function
data, the y data that the function is to be fit to. Should be a vector
(N,) or a 2D array with one single dimension.
function, the function to be scaled with a linear factor to fit the
data. Again it should be a vector (N,) or a 2D array with one
single dimension. data and function must be the same length.
OPTIONAL:
sigma, the value small value that determines when iteration stops
Output:
a, a single scalar describing the best-fit value of "a"
'''
# Initialize parametrs, including change and the initial minimum
change = 100 # Arbitrary value greater than sigma
minval = N.sum((data - function) ** 2) # Initial Min
# Calculate the intial multiplicative factor between the data and function,
# and use to set range for calculating minimums
Amin = 0
Amax = (data/function).max()
# Iterate
while change > sigma:
# Create Array of Amplitudes for the fit
Arr = N.linspace(Amin,Amax,100)
Test = N.matmul(N.reshape(Arr,(-1,1)),function)
# Calculate the square difference between the data and the fit guess
diff = Test - N.matlib.repmat(N.reshape(data,(1,-1)),100,1)
# Find Minimum, and calculate the change and difference.
val = N.sum(diff ** 2, axis = 1)
vali = N.argmin(val)
change = minval - val.min()
minval = val.min()
# Calculate New range of "a" for next iteration
Amin = Arr[max(vali-2,0)]
Amax = Arr[min(vali+2,len(Arr)-1)]
result = N.squeeze(Arr[vali] * function)
return result
def bodhaine(wvl):
'''
bodhaine(wvl) - Calculates the Bodhaine aproximation of rayleigh optical depth
Version 1.0
Created On: Apr 17, 2017
Last Modified: June 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Bodhaine, B. A., N. B. Wood, E. G. Dutton, and J. R. Slusser (1999),
On Rayleigh optical depth calculations, J. Atmos. Ocean. Technol.,
16(11 PART 2), 1854–1861,
doi:10.1175/1520-0426(1999)016<1854:ORODC>2.0.CO;2.
Inputs:
wvl - a vector of wavelengths at which to calculate the rayleigh optical
depth. Wavelength sould be in MICROMETERS
Output:
tr - vector of rayleigh optical depths corresponding to wavelengths from the input vectora single scalar describing the best-fit value of "a"
'''
s = 0.0021520
a = 1.0455996
b = 341.29061
c = 0.90230850
d = 0.0027059889
e = 85.968563
tr = s * (a - b * wvl ** -2 - c * wvl ** 2)/(1 + d * wvl ** -2 - e * wvl ** 2)
return tr
| 32.509202
| 150
| 0.60351
| 749
| 5,299
| 4.268358
| 0.344459
| 0.027526
| 0.014076
| 0.020019
| 0.357523
| 0.329371
| 0.329371
| 0.329371
| 0.30685
| 0.238974
| 0
| 0.072618
| 0.308738
| 5,299
| 163
| 151
| 32.509202
| 0.799618
| 0.647481
| 0
| 0
| 0
| 0
| 0.027517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0
| 0.217391
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11d8b360dafd771af3d50fb23f126c256bc27cc5
| 423
|
py
|
Python
|
recieve.py
|
RyuYamamoto/inter-process-communication-py
|
377c73833f230ba1132006c2cda86decd3580a5b
|
[
"MIT"
] | null | null | null |
recieve.py
|
RyuYamamoto/inter-process-communication-py
|
377c73833f230ba1132006c2cda86decd3580a5b
|
[
"MIT"
] | null | null | null |
recieve.py
|
RyuYamamoto/inter-process-communication-py
|
377c73833f230ba1132006c2cda86decd3580a5b
|
[
"MIT"
] | null | null | null |
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 50007))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print('data: {}, add: {}'.format(data, addr))
conn.sendall(b'Recieved: ' + data)
| 28.2
| 61
| 0.486998
| 52
| 423
| 3.923077
| 0.615385
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061069
| 0.380615
| 423
| 14
| 62
| 30.214286
| 0.717557
| 0
| 0
| 0.153846
| 0
| 0
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11dc5601e32f2a14e2e6dbd6c443d6cb0fdbc322
| 4,503
|
py
|
Python
|
utils.py
|
bbpp222006/elec_nose_plus
|
d79faa47d3fbb63c697501dd521e834bcc8e4814
|
[
"MIT"
] | 1
|
2021-04-08T04:17:04.000Z
|
2021-04-08T04:17:04.000Z
|
utils.py
|
bbpp222006/elec_nose_plus
|
d79faa47d3fbb63c697501dd521e834bcc8e4814
|
[
"MIT"
] | null | null | null |
utils.py
|
bbpp222006/elec_nose_plus
|
d79faa47d3fbb63c697501dd521e834bcc8e4814
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# encoding: utf-8
#!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
from tqdm import tqdm
import numpy as np
import cv2
import os
import random
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (list): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet):
alphabet.remove('基线')
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by ctc
self.dict[char] = i + 1
self.dict['基线'] = 0
self.dict_reverse = {value: key for key, value in self.dict.items()}
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
length = []
result = []
for char in text:
length.append(1)
index = self.dict[char]
result.append(index)
text = result
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length=1, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
t = t.numpy()
result = []
for value in t:
index = self.dict_reverse[value]
result.append(index)
texts = result
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def props_to_onehot(props):
if isinstance(props, list):
props = np.array(props)
a = np.argmax(props, axis=1)
b = np.zeros((len(a), props.shape[1]))
b[np.arange(len(a)), a] = 1
return b
def onehot_to_num(onehot):
if isinstance(onehot, list):
onehot = np.array(onehot)
b = np.zeros((onehot.shape[0], 1))
for i, h in enumerate(onehot):
b[i, 0] = np.argwhere(onehot[i] == 1)
return b
def draw(preds, x_train_batch,x_label,ax):
predsnp = preds.cpu().detach().numpy()
x_train_batchnp = x_train_batch.cpu().detach().numpy()
x_label = x_label.cpu().detach().numpy()
# print(predsnp.shape, x_train_batchnp.shape) # (2000,6)
predsnp = props_to_onehot(predsnp)
# print(predsnp)
predsnp = onehot_to_num(predsnp)
# print(max(predsnp))
#对原数据进行kmeans分类
estimator = KMeans(n_clusters=2) # 构造聚类器
estimator.fit(x_train_batchnp) # 聚类
label_pred = estimator.labels_ # 获取聚类标签
# 绘制k-means结果
if label_pred[0]==1:
label_pred = 1-label_pred
# plt.plot(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label,'go-')
# plt.plot(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1))) * x_label,'go-')
ax.scatter(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label, c="green", marker='o',s = 10, label='kmeans')
ax.scatter(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1)))*x_label, c="green", marker='o',s = 10, label='kmeans')
for i in range(int(max(predsnp))+1):
x= np.argwhere(predsnp == i)[:,0]
y = np.ones(len(x))*i
# plt.plot(x, y, c = "red")
ax.scatter(x, y, c = "red", marker='.', label='pred',s = 5)
| 27.457317
| 143
| 0.584055
| 628
| 4,503
| 4.092357
| 0.281847
| 0.045525
| 0.046693
| 0.059144
| 0.224903
| 0.224903
| 0.193774
| 0.193774
| 0.193774
| 0.168872
| 0
| 0.015105
| 0.279591
| 4,503
| 163
| 144
| 27.625767
| 0.777127
| 0.282034
| 0
| 0.068966
| 0
| 0
| 0.01178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114943
| false
| 0
| 0.126437
| 0
| 0.321839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11dc7d7484bc78800544b03df7488f722be7a5ea
| 2,729
|
py
|
Python
|
down.py
|
pcahan1/CellNet_Cloud
|
a228953946b81ccb304fbd068e33766e134103b6
|
[
"MIT"
] | 1
|
2020-11-13T10:53:27.000Z
|
2020-11-13T10:53:27.000Z
|
down.py
|
pcahan1/CellNet_Cloud
|
a228953946b81ccb304fbd068e33766e134103b6
|
[
"MIT"
] | 2
|
2020-06-28T18:17:59.000Z
|
2020-12-18T14:11:29.000Z
|
down.py
|
pcahan1/CellNet_Cloud
|
a228953946b81ccb304fbd068e33766e134103b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input FASTQ Directory")
parser.add_argument("-n", "--number", type=int, help="number of reads to sample")
args = parser.parse_args()
random.seed(12)
if not args.number:
print("No sample size specified. Defaulting to five million reads.")
args.number = 5000000
# LIST FILES TO BE DOWN-SAMPLED
fastq_files = os.listdir(args.input)
if int(len(fastq_files)) <= 0:
print("No files in listed directory")
exit()
# CREATE OUTPUT DIRECTORY
output_dir = "subset_"+args.input
os.mkdir(output_dir)
for fastq in fastq_files:
print("\tcounting records....")
with open(args.input+"/"+fastq) as inRead:
num_lines = sum([1 for line in inRead])
print("Num lines:" + str(num_lines) )
if int(num_lines % 4) != 0:
print("FILE " + fastq + " CORRUPTED: Number of lines in FASTQ file not divisible by 4. Is file decompressed?")
exit()
total_records = int(num_lines / 4)
number_to_sample = args.number
print("\tsampling " + str(number_to_sample) + " out of " + str(total_records) + " records")
try:
records_to_keep = set(random.sample(range(total_records), number_to_sample))
record_number = 0
with open(args.input+"/"+fastq) as inFile:
with open(output_dir+"/"+"subset_"+fastq, "w") as output:
for tag in inFile:
bases = next(inFile)
sign = next(inFile)
quality = next(inFile)
if record_number in records_to_keep:
output.write(tag)
output.write(bases)
output.write(sign)
output.write(quality)
record_number += 1
except ValueError as e:
if str(e) == "Sample larger than population or is negative":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
elif str(e) == "sample larger than population":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
else:
raise
print("Compressing downsampled reads")
os.system("COPYFILE_DISABLE=1 tar cvfz compressed_reads.tgz "+output_dir)
if os.path.getsize("compressed_reads.tgz") >= 4000000000:
print("WARNING: Your archive contains too many FASTQ files. Max size is 4GB.")
else:
print("Archive file size is ~"+str(os.path.getsize("compressed_reads.tgz")/1000000000)+"GB")
| 35.907895
| 118
| 0.629901
| 357
| 2,729
| 4.703081
| 0.366947
| 0.028588
| 0.038714
| 0.02025
| 0.214413
| 0.214413
| 0.113163
| 0.113163
| 0.113163
| 0.113163
| 0
| 0.019307
| 0.259802
| 2,729
| 75
| 119
| 36.386667
| 0.811881
| 0.027116
| 0
| 0.135593
| 0
| 0
| 0.30279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.067797
| 0
| 0.067797
| 0.220339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11deda09dc4cd77f3a703e78c0ad5fb515e8de96
| 3,507
|
py
|
Python
|
CSR/utility.py
|
MoreNiceJay/CAmanager_web
|
29c6e35b9b1b9e8d851b2825df18e34699f6c5d2
|
[
"bzip2-1.0.6"
] | null | null | null |
CSR/utility.py
|
MoreNiceJay/CAmanager_web
|
29c6e35b9b1b9e8d851b2825df18e34699f6c5d2
|
[
"bzip2-1.0.6"
] | 3
|
2020-02-11T23:59:34.000Z
|
2021-06-10T21:19:16.000Z
|
CSR/utility.py
|
MoreNiceJay/CAmanager_web
|
29c6e35b9b1b9e8d851b2825df18e34699f6c5d2
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.shortcuts import render
import sys, json, random, hashlib, calendar,time, datetime, os, random
import ast
from cryptography.fernet import Fernet
from django.shortcuts import redirect
from django.http import Http404, HttpResponse
import json
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption,load_pem_private_key
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import serialization,hashes
from cryptography.x509 import load_pem_x509_csr
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.backends import default_backend
def generate_private_key(algorithm):
private_key = None
if algorithm == "RSA_2048":
private_key = generate_RSA_private_key(2048)
elif algorithm == "RSA_4096":
private_key = generate_RSA_private_key(4096)
elif algorithm == "ECDSA_P256":
private_key = generate_ECP256_private_key()
elif algorithm == "ECDSA_P384":
private_key = generate_ECP384_private_key()
else:
raise AlgorithmMismatchError
return private_key
def generate_RSA_private_key(KEY_SIZE):
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=KEY_SIZE,
backend=default_backend()
)
return private_key
def generate_ECP384_private_key():
private_key = ec.generate_private_key(
ec.SECP384R1(), default_backend()
)
return private_key
def generate_ECP256_private_key():
private_key = ec.generate_private_key(
ec.SECP384R1(), default_backend()
)
return private_key
def generate_pub_key(private_key):
public_key = private_key.public_key()
return public_key
def encode_private_key_pem_format(private_key):
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return pem
def encode_public_key_pem_format(public_key):
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
return pem
def encode_in_Base64(key_in_pem_format):
with open("base64.key", "rb") as f:
key = f.read()
f = Fernet(key)
token = f.encrypt(key_in_pem_format)
return token.decode()
def decode_Base64(encrypted_key_token):
with open("base64.key", "rb") as f:
key = f.read()
f = Fernet(key)
return f.decrypt(encrypted_key_token.encode())
def generate_CSR(country,state,locality,organization,common_name,domain,private_key):
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, country),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, state),
x509.NameAttribute(NameOID.LOCALITY_NAME, locality),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, organization),
x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])).add_extension(
x509.SubjectAlternativeName([
# Describe what sites we want this certificate for.
x509.DNSName(domain),
]),
critical=False,
# Sign the CSR with our private key.
).sign(private_key, hashes.SHA256(), default_backend())
return csr
def encode_CSR_in_pem_format(temp_csr):
return temp_csr.public_bytes(serialization.Encoding.PEM).decode()
| 34.722772
| 115
| 0.741374
| 430
| 3,507
| 5.776744
| 0.267442
| 0.128824
| 0.048309
| 0.030596
| 0.220209
| 0.192834
| 0.167874
| 0.10628
| 0.10628
| 0.10628
| 0
| 0.03459
| 0.175649
| 3,507
| 101
| 116
| 34.722772
| 0.824628
| 0.023952
| 0
| 0.206897
| 0
| 0
| 0.017539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126437
| false
| 0
| 0.16092
| 0.011494
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11df93a40b853400f38b4c489077ebc7674cd549
| 51,584
|
py
|
Python
|
uctp_ufabc/src/uctp.py
|
luizfmgarcia/uctp_ufabc
|
2342f5431e258a4feffdf4e7931344a9d03a8f9c
|
[
"MIT"
] | null | null | null |
uctp_ufabc/src/uctp.py
|
luizfmgarcia/uctp_ufabc
|
2342f5431e258a4feffdf4e7931344a9d03a8f9c
|
[
"MIT"
] | 6
|
2018-10-30T00:37:20.000Z
|
2019-07-23T00:23:18.000Z
|
uctp_ufabc/src/uctp.py
|
luizfmgarcia/uctp_ufabc
|
2342f5431e258a4feffdf4e7931344a9d03a8f9c
|
[
"MIT"
] | 1
|
2019-06-06T00:54:13.000Z
|
2019-06-06T00:54:13.000Z
|
# UCTP Main Methods
import objects
import ioData
import random
# Set '1' to allow, during the run, the print on terminal of some steps
printSteps = 0
#==============================================================================================================
# Create the first generation of solutions
def start(solutionsNoPop, subjList, profList, init):
if(printSteps == 1): print("Creating first generation...", end='')
for _ in range(init): solutionsNoPop.addCand(newCandRand(subjList, profList))
if(printSteps == 1): print("Created first generation!")
#-------------------------------------------------------
# Create new Candidate Full-Random
def newCandRand(subjList, profList):
candidate = objects.Candidate()
# Follow the subjects in 'subjList', in order, and for each one, choose a professor randomly
for sub in subjList: candidate.addRelation(sub, profList[random.randrange(len(profList))])
return candidate
#==============================================================================================================
# Extracts info about what Subj appears in which Prof PrefList
def extractSubjIsPref(subjList, profList):
# Lists for each Prof, where it is '1' if Subj in respective index is on Prof List of Pref but not same Quadri
# '2' if same quadri
subjIsPrefList = [[0 for _ in range(len(subjList))] for _ in range(len(profList))]
# Counting the occurrences, filling the vectors
for pIndex in range(len(profList)):
# Getting data of current Prof
prefSubjLists = [i for i in profList[pIndex].getPrefSubjLists()]
# All Relations of one Prof
for sIndex in range(len(subjList)):
# Getting data of current Subj
sName = subjList[sIndex].getName()
sQuadri = subjList[sIndex].getQuadri()
# For each quadri
for i in range(3):
# Finding the Subject 'sName' in "pPrefSubjQXList+pPrefSubjLimList" list
sumList = prefSubjLists[i] + prefSubjLists[3]
# Checking if the List is not empty
if(len(sumList) > 0):
try: index_value = sumList.index(sName)
except ValueError: index_value = -1
# If the Subj name appears in the list
if(index_value != -1):
# If current Subject in analysis is on current Quadri
if(str(i+1) in sQuadri):
# Informing that the Subj appears on respective Prof-QuadriPrefList
subjIsPrefList[pIndex][sIndex] = 2
# Informing that the Subj appears on other Prof-QuadriPrefList that is not same Quadri
else:
# Granting that do not decrease a value 2 already set
if(subjIsPrefList[pIndex][sIndex] == 0): subjIsPrefList[pIndex][sIndex] = 1
return subjIsPrefList
#==============================================================================================================
# Separation of solutions into 2 populations
def twoPop(solutionsNoPop, infPool, feaPool, profList, subjList, weightsList, numInfWeights):
# Granting that the Lists will be empty to receive new Solutions
infPool.resetCandList()
feaPool.resetCandList()
for cand in solutionsNoPop.getCandList():
# Classification by checking feasibility
pop = checkFeasibility(cand, profList, subjList, weightsList, numInfWeights)
if(pop == "feasible"): feaPool.addCand(cand)
elif(pop == "infeasible"): infPool.addCand(cand)
# Granting that the List will be empty to next operations
solutionsNoPop.resetCandList()
if(printSteps == 1): print("Checked Feasibility (new Candidates)/", end='')
#==============================================================================================================
# Detect the violation of a Restriction into a candidate
def checkFeasibility(candidate, profList, subjList, weightsList, numInfWeights):
# As part of the Candidate's Prof-Subj relations (with both Feasible and the Infeasible) will be traversed to check they Feasibility here,
# instead of re-pass an entire Infeasible Candidate again in the 'calc_fitInfeas', the calculation of its Fitness will already be done
# only one time here. Only the Feasible ones will have to pass through 'calc_fitFeas' later.
fit = -1
fit = calc_fitInfeas(candidate, profList, subjList, weightsList[:numInfWeights])
if(fit < 0):
candidate.setFitness(fit)
return "infeasible"
return "feasible"
#==============================================================================================================
# Calculate the Fitness of the candidate
def calcFit(infeasibles, feasibles, profList, subjList, weightsList, numInfWeights, subjIsPrefList):
# All Infeasible Candidates - is here this code only for the representation of the default/original algorithm`s work
# The Inf. Fitness calc was already done in 'checkFeasibility()' method
# Check if the Infeasible pop. is empty
if(len(infeasibles.getCandList()) != 0):
for cand in infeasibles.getCandList():
if(cand.getFitness() == 0.0):
# Setting the Fitness with the return of calc_fitInfeas() method
cand.setFitness(calc_fitInfeas(cand, profList, subjList, weightsList[:numInfWeights]))
if(printSteps == 1): print("Fitness of all Inf./", end='')
# All Feasible Candidates
# Check if the Feasible pop. is empty
if(len(feasibles.getCandList()) != 0):
for cand in feasibles.getCandList():
if(cand.getFitness() == 0.0):
# Setting the Fitness with the return of calc_fitFeas() method
cand.setFitness(calc_fitFeas(cand, profList, subjList, weightsList[numInfWeights:], subjIsPrefList))
if(printSteps == 1): print("Fitness of all Feas./", end='')
#==============================================================================================================
# Calculate Fitness of Infeasible Candidates
def calc_fitInfeas(candidate, profList, subjList, weightsList):
# Getting information about the Candidate
prof_relationsList = calc_i1(candidate, profList, subjList)
i2_conflictsList, i3_conflictsList = calc_i2_i3(prof_relationsList, subjList)
# Setting found variables
candidate.setInfVariables(prof_relationsList, i2_conflictsList, i3_conflictsList)
# Checking if occurred violations of restrictions on the Candidate
# If there are violated restrictions, this Candidate is Infeasible and then will calculate and return a negative Fitness,
# if not, is Feasible, will return 1.0 as Fitness
if(prof_relationsList.count([]) != 0 or i2_conflictsList.count([]) != len(i2_conflictsList) or i3_conflictsList.count([]) != len(i3_conflictsList)):
# Calculating main variables
i1 = float(prof_relationsList.count([]) / (len(profList) - 1.0))
i2 = float(sum([len(i) for i in i2_conflictsList]) / len(subjList))
i3 = float(sum([len(i) for i in i3_conflictsList]) / len(subjList))
i = [i1, i2, i3]
# Final Infeasible Function Fitness Calc
Fi = -1.0 * sum([i[j] * weightsList[j] for j in range(len(i))]) / sum([w for w in weightsList])
# Returning the calculated result
return Fi
# If all Relations Prof-Subj in this Candidate passed through the restrictions)
return 1.0
#-------------------------------------------------------
# i1: penalty to how many Professors does not have at least one relation with a Subject
def calc_i1(candidate, profList, subjList):
# List of lists of Subjects that are related to the same Professor, where the position in this list is the same of the same professor in 'profList' list
# Empty list in this list means that some Professor (p) does not exists on the Candidate
prof_relationsList = [[] for _ in range(len(profList))]
# Filling the list according to the candidate
for s, p in candidate.getRelationsList():
indexp = profList.index(p)
indexs = subjList.index(s)
prof_relationsList[indexp].append(indexs)
return prof_relationsList
#-------------------------------------------------------
# i2: penalty to how many Subjects, related to the same Professor, are teach in the same day, hour and quadri
# i3: penalty to how many Subjects, related to the same Professor, are teach in the same day and quadri but in different campus
def calc_i2_i3(prof_relationsList, subjList):
# List of the subjects that have a conflict between them - always the two conflicts are added, that is,
# there can be repetitions of subjects
i2_conflictsList, i3_conflictsList = [[] for _ in range(len(prof_relationsList))], [[] for _ in range(len(prof_relationsList))]
# Searching, in each professor (one at a time), conflicts of schedules between subjects related to it
for list_subj in prof_relationsList:
# Current Prof in analysis
profIndex = prof_relationsList.index(list_subj)
# Check if the professor has more than 1 relation Prof-Subj to analyze
if(len(list_subj) > 1):
# Getting the data of all Subjects related to current Professor in analysis
timetableList_List = [subjList[i].getTimeTableList() for i in list_subj]
quadri_List = [subjList[i].getQuadri() for i in list_subj]
campus_List = [subjList[i].getCampus() for i in list_subj]
period_List = [subjList[i].getPeriod() for i in list_subj]
# Comparing the data of one Subject (i) with all next subjects listed, and do the same with next ones
i = 0
for timeTable in timetableList_List:
# all [day/hour/frequency] of the Timetable of the Subject (i) in 'timetableList_List'
i_day = [j[0] for j in timeTable]
i_hour = [j[1] for j in timeTable]
i_frequency = [j[2] for j in timeTable]
# Now, comparing current (i) subject data with next ones (k), one at a time
k = i + 1
rest = timetableList_List[k:]
# repeat this 'len(rest)' times
for nextK in rest:
# Already check if both Subj (i, k) is on same Quadri
if(quadri_List[i] == quadri_List[k]):
# Variables that flags if a conflict was already detected (do not count 2 or more times same 2 subjects in conflict)
verified_i2, verified_i3 = False, False
# all [day/hour/frequency] of the Timetable of the Subject (k) in 'timetableList_List'
inext_day = [j[0] for j in nextK]
inext_hour = [j[1] for j in nextK]
inext_frequency = [j[2] for j in nextK]
# Finally comparing one-to-one timetables - between i and k subjects
for a in i_day:
for b in inext_day:
if(a == b):
# There is, at least, two subjects teach in the same day and quadri, but in different campus
if(campus_List[i] != campus_List[k]):
if(verified_i3 == False):
i3_conflictsList[profIndex].append(list_subj[i])
i3_conflictsList[profIndex].append(list_subj[k])
verified_i3 = True
# There is, at least, two subjects teach in the same day, hour and quadri
# First check if they have the same Period
if(period_List[i] == period_List[k] and i_hour[i_day.index(a)] == inext_hour[inext_day.index(b)]):
# if one 'frequency' is "QUINZENAL I" and the other is "QUINZENAL II" then DO NOT count
if('SEMANAL' in i_frequency[i_day.index(a)] or 'SEMANAL' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
elif('QUINZENAL I' in i_frequency[i_day.index(a)] and 'QUINZENAL I' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
elif('QUINZENAL II' in i_frequency[i_day.index(a)] and 'QUINZENAL II' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
# Going to the next Subject (k+1) to compare with the same, current, main, Subject (i)
k = k + 1
# Going to the next Subject (i+1) related to the same Professor
i = i + 1
# Removing from 'i2_conflictsList' and 'i3_conflictsList' duplicates
final_i2 = [[] for _ in range(len(prof_relationsList))]
final_i3 = [[] for _ in range(len(prof_relationsList))]
for i in range(len(prof_relationsList)):
for j in i2_conflictsList[i]:
if(final_i2[i].count(j) == 0): final_i2[i].append(j)
for j in i3_conflictsList[i]:
if(final_i3.count(j) == 0): final_i3[i].append(j)
return final_i2, final_i3
#==============================================================================================================
# Calculate Fitness of Feasible Candidates
def calc_fitFeas(candidate, profList, subjList, weightsList, subjIsPrefList):
prof_relationsList, _, _, _, _, _ = candidate.getFeaVariables()
# Looking for good Relations into the Candidate using "Quality Amplifiers"
# Getting information about the Candidate
sum_chargesRelative, difChargeList = calc_f1(subjList, profList, prof_relationsList)
sum_Satisfaction, numSubjPrefList = calc_f2(subjList, profList, prof_relationsList, subjIsPrefList)
sum_quadSabbNotPref, quadSabbNotPrefList = calc_f3(subjList, profList, prof_relationsList)
sum_periodPref, periodPrefList = calc_f4(subjList, profList, prof_relationsList)
sum_campusPref, campPrefList = calc_f5(subjList, profList, prof_relationsList)
sum_relationsRelative, _ = calc_f6(subjList, profList, prof_relationsList)
sum_qualityRelative, _ = calc_f7(subjList, profList, prof_relationsList, subjIsPrefList)
# Setting found variables
candidate.setFeaVariables(prof_relationsList, numSubjPrefList, periodPrefList, quadSabbNotPrefList, campPrefList, difChargeList)
# Calculating main variables
f1 = 1.0 - float(sum_chargesRelative / len(profList))
f2 = float(sum_Satisfaction / len(profList))
f3 = float(sum_quadSabbNotPref / len(subjList))
f4 = float(sum_periodPref / len(subjList))
f5 = float(sum_campusPref / len(subjList))
f6 = 1.0 - float(sum_relationsRelative / len(profList))
f7 = float(sum_qualityRelative / len(profList))
f = [f1, f2, f3, f4, f5, f6, f7]
# Final Feasible Function Fitness Calc
Ff = sum([f[j] * weightsList[j] for j in range(len(f))]) / sum([w for w in weightsList])
# Returning the result calculated
return Ff
#-------------------------------------------------------
# f1: how balanced is the distribution of Subjects, considering the "Charge" of each Professor and its Subj related
def calc_f1(subjList, profList, prof_relationsList):
# List of all 'Effective Charges', that is, the sum of the charges of all the subjects related to the professor
charges_eachProfRelations = [0 for _ in range(len(profList))]
# List of requested charges of each professor
charges_EachProf = [profList[i].getCharge() for i in range(len(profList))]
# Counting the occurrences, filling the vectors
for i in range(len(prof_relationsList)):
# Summing all chargers of all relations of this Prof
charges_eachProfRelations[i] = sum([subjList[sIndex].getCharge() for sIndex in prof_relationsList[i]])
# Difference of Prof Charge and the sum of all of its Subj-Relations
difChargeList = [charges_EachProf[i] - charges_eachProfRelations[i] for i in range(len(profList))]
# Relative weigh of excess or missing charge for each Prof - based on the absolute credit difference
# between the credits requested by the Prof and the sum off all Subj related to it
charges_relative = [float(abs(difChargeList[i]) / charges_EachProf[i]) for i in range(len(profList))]
# Making a simple adjust on the value
charges_relativeFinal = [charge if charge < 1.0 else 1.0 for charge in charges_relative]
# The sum of charge discrepancies of all professors
sum_chargesRelative = sum([charge for charge in charges_relativeFinal])
return sum_chargesRelative, difChargeList
#-------------------------------------------------------
# f2: how many and which Subjects are the professors preference, considering "prefSubj..." Lists
def calc_f2(subjList, profList, prof_relationsList, subjIsPrefList):
# These are Lists (each quadri - 3) of Lists (each professor) of Lists (each PrefList+LimList)
# In each List (inside the List inside the List) we have 1 if the same index Subject (from same Quadri X Pref List + Lim Pref List) is related to Same Prof
# or we have 0 if it is not related
qX_relations = [[[] for _ in range(len(profList))] for _ in range(3)]
# List with the number of subjects that are on respective Prof's List of Preferences
numSubjPrefList = [0 for _ in range(len(profList))]
# Counting the occurrences, filling the vectors
for relations in prof_relationsList:
# Setting Index of current Prof
pIndex = prof_relationsList.index(relations)
# Getting data of current Prof
prefSubjLists = [i for i in profList[pIndex].getPrefSubjLists()]
# For each Quadri - Filling QX Lists of current Prof
# in each one appends "pPrefSubjQXList" with "pPrefSubjLimList" to have the length of the subList
for i in range(3):
qX_relations[i][pIndex] = [0 for _ in range(len(prefSubjLists[i]) + len(prefSubjLists[3]))]
# All Relations of one Prof
for sIndex in relations:
# Getting data of current Subj
sName = subjList[sIndex].getName()
sQuadri = subjList[sIndex].getQuadri()
# For each quadri
for i in range(3):
# Looking for only in the list of respective quadri of current Subject in analysis
if(str(i+1) in sQuadri):
# Finding the Subject 'sName' in "pPrefSubjQXList+pPrefSubjLimList" list
sumList = prefSubjLists[i] + prefSubjLists[3]
# Checking if the List is not empty
if(len(sumList) > 0):
try: index_value = sumList.index(sName)
except ValueError: index_value = -1
# If the Subj name appears in the list
if(index_value != -1):
# Putting '1' in same position found 'index_value' in the subList (which this one, is in same position of profList)
qX_relations[i][pIndex][index_value] = 1
# Adding the Subj that is on Prof Pref List
numSubjPrefList[pIndex] = numSubjPrefList[pIndex] + 1
# Calculating intermediate variables
# Lists of the calculation of "satisfaction" based on the order of Subjects choose by a Professor (index = 0 has more weight)
finalQX = [[0.0 for _ in range(len(profList))] for _ in range(3)]
# For each Qaudri
for i in range(3):
# Calculating the Satisfaction from QX relations for each Professor
for list_choice_relation in qX_relations[i]:
# Setting current Prof Index and current List Relations-Preference
prof_index = qX_relations[i].index(list_choice_relation)
len_current_list = len(list_choice_relation)
# Initializing current position and total weight that will be calculated next
total_weight = 0
# Checking if the Relations-Preference List is empty
if(len_current_list == 0): finalQX[i][prof_index] = 1.0
# If is needed to be calculated (is not empty)
else:
# QX Relations of each Professor
for h in list_choice_relation:
# Setting current Subject Preference Position
pref_index = list_choice_relation.index(h)
# Summing the Total Weight of this list of preferences to normalize later (+1 because first index is 0)
total_weight = total_weight + pref_index + 1
# If the current Subj, in this specific position on the Preference List of current Prof, is related to it
if(h == 1):
# Summing the respective weight the Subj has in the Prof List of Preferences
finalQX[i][prof_index] = finalQX[i][prof_index] + (len_current_list - pref_index + 1)
# Calculate the final value of "Satisfaction" normalized, after obtained and summed all weights from Subjects related to current professor
finalQX[i][prof_index] = float(finalQX[i][prof_index] / total_weight)
# Calculate the final value of a Prof "satisfaction" summing all 3 values (from finalQ1, finalQ2 and finalQ3 lists) and normalizing it
final_Satisf = [float((finalQX[0][i] + finalQX[1][i] + finalQX[2][i]) / 3.0) for i in range(len(finalQX[0]))]
# Finally, calculating all Professors Satisfaction summing all final values
sum_Satisfaction = sum([value for value in final_Satisf])
return sum_Satisfaction, numSubjPrefList
#-------------------------------------------------------
# f3: how many Subjects are teach in a "Quadri" that is not the same of Professors 'quadriSabbath'
def calc_f3(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on different Quadri of prof's QuadSabb
quadSabbNotPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pQuadriSabbath = profList[i].getQuadriSabbath()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sQuadri = subjList[sIndex].getQuadri()
# Adding to count if the Subj is not in the same 'pQuadriSabbath' (if Prof choose 'nenhum' he does not have a 'pQuadriSabbath')
if('NENHUM' in pQuadriSabbath or sQuadri != pQuadriSabbath): quadSabbNotPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_quadSabbNotPref = sum([len(listSubj) for listSubj in quadSabbNotPrefList])
return sum_quadSabbNotPref, quadSabbNotPrefList
#-------------------------------------------------------
# f4: how many Subjects are teach in the same "Period" of the Professor preference "pPeriod"
def calc_f4(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on same Period of prof's Period
periodPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pPeriod = profList[i].getPeriod()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sPeriod = subjList[sIndex].getPeriod()
# Adding to count if the Subj is in the same 'pPeriod' or if Prof do not care about 'pPeriod' equal to 'NEGOCIAVEL'
if('NEGOCI' in pPeriod or sPeriod == pPeriod): periodPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_periodPref = sum([len(listSubj) for listSubj in periodPrefList])
return sum_periodPref, periodPrefList
#-------------------------------------------------------
# f5: how many Subjects are teach in the same "Campus" of the Professor preference "prefCampus"
def calc_f5(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on same Campus of prof's Campus
campPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pPrefCampus = profList[i].getPrefCampus()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sCampus = subjList[sIndex].getCampus()
# Adding to count if the Subj is in the same 'pPrefCampus'
if(sCampus == pPrefCampus): campPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_campusPref = sum([len(listSubj) for listSubj in campPrefList])
return sum_campusPref, campPrefList
#-------------------------------------------------------
# f6: average of relations between profs
def calc_f6(subjList, profList, prof_relationsList):
# Number of Subjs ideal for each professor
avgSubjperProf = float(len(subjList)/len(profList))
# Difference between num of relations of each prof and the average
difNumRel = [len(relations) - avgSubjperProf for relations in prof_relationsList]
# Relative weigh of excess or missing relations for each Prof - based on the absolute relations difference
relations_relative = [float(abs(difNumRel[i]) / avgSubjperProf) for i in range(len(prof_relationsList))]
# Making a simple adjust on the values
relations_relativeFinal = [value if value < 1.0 else 1.0 for value in relations_relative]
# The sum of relations discrepancies of all professors
sum_relationsRelative = sum([charge for charge in relations_relativeFinal])
return sum_relationsRelative, difNumRel
#-------------------------------------------------------
# f7: quality of relations (subj appears in some list of pref or/and same quadri)
def calc_f7(subjList, profList, prof_relationsList, subjIsPrefList):
# Summing, for each professor, its relations qualities
sumRelationsQuality = [sum([subjIsPrefList[i][pos] for pos in prof_relationsList[i]]) for i in range(len(prof_relationsList))]
# Relative value of quality of all relations for each Prof (2 is the max value of quality - same quadri of pref list)
qualityRelative = [float(sumRelationsQuality[i] / (2 * len(prof_relationsList[i]))) for i in range(len(prof_relationsList))]
# The sum of relative qualities of all professors
sum_qualityRelative = sum([value for value in qualityRelative])
return sum_qualityRelative, qualityRelative
#==============================================================================================================
# Generate new solutions from the current Infeasible population
def offspringI(solutionsNoPop, solutionsI, profList, subjList, subjIsPrefList, mutWithRand):
# Check if the Infeasible pop. is empty
if(len(solutionsI.getCandList()) != 0):
# Make a Mutation for each candidate, trying to repair a restriction problem maker
for cand in solutionsI.getCandList():
newCand = mutationI(cand, profList, subjList, subjIsPrefList, mutWithRand)
# Adding the new Candidate generated by Mutation to 'solutionsNoPop'
solutionsNoPop.addCand(newCand)
if(printSteps == 1): print("Inf. Offspring/", end='')
#==============================================================================================================
# Generate new solutions from the current Feasible population
def offspringF(solutionsNoPop, solutionsF, profList, subjList, subjIsPrefList, maxNumCand_perPop, pctParentsCross, reposCross, twoPointsCross, mutWithRand):
# Check if the Feasible pop. is empty
if(len(solutionsF.getCandList()) != 0):
# 'objectiveNum': number of solutions to become parents - based on 'pctParentsCross'
objectiveNum = int(pctParentsCross * len(solutionsF.getCandList()) / 100)
# Turning 'objectiveNum' to Even if it is Odd -> summing +1 to it only if the new 'objectiveNum' is not bigger then len(solutionsF)
if(objectiveNum % 2 != 0):
if((objectiveNum + 1) <= len(solutionsF.getCandList())): objectiveNum = objectiveNum + 1
else: objectiveNum = objectiveNum - 1
# Granting that are solutions enough to became fathers (more than or equal 2)
if(objectiveNum < 2):
# If have at most 1 solution (insufficient to make any crossover) - then all solutions will generate a child through a mutation
for cand in solutionsF.getCandList(): solutionsNoPop.addCand(mutationF(cand, profList, subjList, subjIsPrefList,mutWithRand))
# If we have at least 2 solutions
else:
# Roulette Wheel to choose solutions to become Parents
fitnessList = [cand.getFitness() for cand in solutionsF.getCandList()]
parentsSolFeas, notParents_objectsList, _ = rouletteWheel(solutionsF.getCandList(), fitnessList, objectiveNum, reposCross)
# Solutions 'children' created by crossover
childSolFeas = []
# Make a Crossover (create two new candidates) for each pair of parents candidates randomly choose
# Granting the number of children is equal of parents
while(len(childSolFeas) != objectiveNum):
# If there are only 2 parents, make a crossover between them
if(len(parentsSolFeas) <= 2): parent1, parent2 = 0, 1
# If there are more then 2, choosing the parents Randomly
else:
parent1, parent2 = random.randrange(len(parentsSolFeas)), random.randrange(len(parentsSolFeas))
# Granting the second parent is not the same of first one
while(parent1 == parent2): parent2 = random.randrange(len(parentsSolFeas))
# Making the Crossover with the selected parents
newCand1, newCand2 = crossover(parentsSolFeas[parent1], parentsSolFeas[parent2], twoPointsCross)
# Removing used parents to make a new selection of Parents
parent2 = parentsSolFeas[parent2]
parentsSolFeas.remove(parentsSolFeas[parent1])
parentsSolFeas.remove(parent2)
# adding the new candidates generated to childSolFeas
childSolFeas.append(newCand1)
childSolFeas.append(newCand2)
# Adding the child generated by crossover to 'solutionsNoPop'
for cand in childSolFeas:
solutionsNoPop.addCand(cand)
# Make Mutation with all the candidates that were not chosen to be Parents right before
for cand in notParents_objectsList:
# Making a not random mutation
newCand = mutationF(cand, profList, subjList, subjIsPrefList,mutWithRand)
# Adding the child not generated by crossover to 'solutionsNoPop'
solutionsNoPop.addCand(newCand)
if(printSteps == 1): print("Feas. Offspring/", end='')
#==============================================================================================================
# Make a mutation into a infeasible candidate
def mutationI(candidate, profList, subjList, subjIsPrefList, mutWithRand=1):
# Getting data to work with
relations = candidate.getRelationsList()[:]
prof_relationsList, i2_conflictsList, i3_conflictsList = candidate.getInfVariables()
# This While ensures that 'problemType' will choose Randomly one 'restriction repair'
flag_work_done = False
while(flag_work_done == False):
# Choosing one type of restriction to repair
if(mutWithRand == 0): problemType = random.randrange(1,4)
if(mutWithRand == 1): problemType = random.randrange(0,4)
if(mutWithRand == 2): problemType = 0
# (0) No repair -> Random Change
if(problemType == 0): flag_work_done, newCand = mutationRand(candidate, profList)
# (1) Prof without relations (with no Subjects) in 'prof_relationsList'
elif(problemType == 1):
# Granting that the 'problemType' do not change good relations without restrictions to repair
if(prof_relationsList.count([]) != 0):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
else:
# (2) 2 or more Subjects (related to the same Prof) with same 'quadri', 'day' and 'hour' in 'i2_conflictsList'
if(problemType == 2): iX_conflictsList = i2_conflictsList
# (3) 2 or more Subjects (related to the same Prof) with same 'day' and 'quadri' but different 'campus' in 'i3_conflictsList'
if(problemType == 3): iX_conflictsList = i3_conflictsList
# Granting that the 'problemType' do not change good relations without restrictions to repair
if(len(iX_conflictsList) != 0 and iX_conflictsList.count([]) != len(iX_conflictsList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, iX_conflictsList)
return newCand
#==============================================================================================================
# Make a mutation into a feasible candidate
def mutationF(candidate, profList, subjList, subjIsPrefList, mutWithRand=1):
# Getting data to work with
relations = candidate.getRelationsList()[:]
prof_relationsList, _, periodPrefList, quadSabbNotPrefList, campPrefList, _ = candidate.getFeaVariables()
# This While ensures that 'adjustType' will choose Randomly one 'Improvement work'
flag_work_done = False
while(flag_work_done == False):
# Choosing one type of 'Improvement work'
if(mutWithRand == 0): adjustType = random.randrange(1,6)
if(mutWithRand == 1): adjustType = random.randrange(0,6)
if(mutWithRand == 2): adjustType = 0
# (0) No 'Improvement work' -> Random Change
if(adjustType == 0): flag_work_done, newCand = mutationRand(candidate, profList)
# (1) Improving number of Relations
elif(adjustType == 1): flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
# (2) Improving number of Subj Preferences
elif(adjustType == 2):
# Building a list with relations that is NOT Pref
notPrefList = [[subjIndex for subjIndex in prof_relationsList[i] if subjIsPrefList[i][subjIndex] == 0] for i in range(len(prof_relationsList))]
# Granting that the 'adjustType' do not change good relations without Problems to improve
if(notPrefList.count([]) != len(notPrefList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
else:
# (3) Improving number of Periods
if(adjustType == 3): XPref = periodPrefList
# (4) Improving number of QuadSabb
if(adjustType == 4): XPref = quadSabbNotPrefList
# (5) Improving number of Campus
if(adjustType == 5): XPref = campPrefList
if(len(XPref) != 0):
# Building a list with relations that is NOT Pref
notPrefList = [[subjIndex for subjIndex in prof_relationsList[i] if [i].count(subjIndex) == 0] for i in range(len(prof_relationsList))]
# Granting that the 'adjustType' do not change good relations without Problems to improve
if(notPrefList.count([]) != len(notPrefList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, notPrefList)
return newCand
#==============================================================================================================
# Make a selection of the solutions from all Infeasible Pop.('infPool' and 'solutionsI')
def selectionI(infPool, solutionsI, maxNumCand_perPop, reposSelInf):
# Check if the Infeasible pop. is empty
if(len(solutionsI.getCandList()) != 0 or len(infPool.getCandList()) != 0):
# Gathering both lists (infPool and solutionsI)
infeasibles_List = solutionsI.getCandList() + infPool.getCandList()
# Check if is needed to make a selection process
if(len(infeasibles_List) > maxNumCand_perPop):
# Roulette Wheel Selection
# Since the value of Fitness is in the range of '-1' and '0' it is needed to be modified to a range of '0' and '1'
fitnessList = [1.0 + cand.getFitness() for cand in infeasibles_List]
infeasibles_List, _, _ = rouletteWheel(infeasibles_List, fitnessList, maxNumCand_perPop, reposSelInf)
# Updating the (new) 'solutionsI' list to the next generation
solutionsI.setCandList(infeasibles_List)
if(printSteps == 1): print("Inf. Selection/", end='')
#==============================================================================================================
# Make a Selection of the best solutions from Feasible Pop.
def selectionF(feaPool, solutionsF, maxNumCand_perPop, pctElitism, reposSelFea):
# Check if the Feasible pop. is empty
if(len(solutionsF.getCandList()) != 0 or len(feaPool.getCandList()) != 0):
# Gathering both lists (feaPool and solutions)
feasibles_List = solutionsF.getCandList() + feaPool.getCandList()
# Check if is needed to make a selection process
if(len(feasibles_List) > maxNumCand_perPop):
# Defining the division of number of candidates between selections process
elitismNum = maxNumCand_perPop * pctElitism / 100.0
if(elitismNum > 0.0 and elitismNum < 1.0): elitismNum = 1
else: elitismNum = int(elitismNum)
roulNum = maxNumCand_perPop - elitismNum
# Elitism and Roulette Selection
listFit = [cand.getFitness() for cand in feasibles_List]
maxFeasibles_List, rest_objectsList, rest_valuesList = elitismSelection(feasibles_List, listFit, elitismNum)
selectedObj, _, _ = rouletteWheel(rest_objectsList, rest_valuesList, roulNum, reposSelFea)
feasibles_List = maxFeasibles_List + selectedObj
# Updating the (new) 'solutionsF' list to the next generation
solutionsF.setCandList(feasibles_List)
if(printSteps == 1): print("Feas. Selection/", end='')
#==============================================================================================================
# Make a rand mutation into a solution
def mutationRand(candidate, profList):
# Getting all relations from Candidate
relations = candidate.getRelationsList()[:]
# Choosing randomly a relation to be modified
original = random.randrange(len(relations))
# Recording the Original Relation
subj, oldProf = relations[original]
# Granting that the 'newProf' is different from the 'oldProf'
newProf = oldProf
while(oldProf == newProf):
# Finding randomly a new Prof
change = random.randrange(len(profList))
newProf = profList[change]
# Setting the new Relation modified, creating and setting a new Candidate
relations[original]=[subj,newProf]
newCand = objects.Candidate()
newCand.setRelationsList(relations)
# Setting the flag to finish the while
flag_work_done = True
# Returning the new Candidate generated
return flag_work_done, newCand
#==============================================================================================================
# Make some deterministic type of adjustment changing some 'bad' relation
def mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, problemList):
# Choosing a professor to lose a relation
# Roulette Wheel - more 'bad' relations -> more weight
weightList = [len(i) for i in problemList]
problemSubList_selected, _, _ = rouletteWheel(problemList, weightList, objectiveNum=1, repos=0)
profLost_Index = problemList.index(problemSubList_selected[0])
# Choosing the relation to be modified
# Roulette Wheel - less preference -> more weight
lessPrefValue = [2 - subjIsPrefList[profLost_Index][subjIndex] for subjIndex in problemList[profLost_Index]]
will_change_index, _, _ = rouletteWheel(problemSubList_selected[0], lessPrefValue, objectiveNum=1, repos=0)
relation_will_change_index = will_change_index[0]
# Recording original relation that will be modified
subjList, oldProf = relations[relation_will_change_index]
# Choosing new Prof to be in the selected relation
# Granting that the new Prof is different from the old one
newProf = oldProf
while(oldProf == newProf):
# Roulette Wheel - more preference AND less relations -> more weight
SubjPrefValuesList = [subjIsPref_subList[relation_will_change_index] for subjIsPref_subList in subjIsPrefList]
# Removing possible Zeros to make the division
prof_relations_final = [len(i) if len(i) != 0 else 0.5 for i in prof_relationsList]
# Getting the weights values
morePrefValueList = [float(SubjPrefValuesList[i] / prof_relations_final[i]) for i in range(len(profList))]
# If there is only one Prof with value != 0.0
if(morePrefValueList.count(0.0) == len(morePrefValueList) - 1):
indexNotZero = [i for i in range(len(profList)) if morePrefValueList[i] != 0.0]
# If is the same of the old one - random choice
if(oldProf == profList[indexNotZero[0]]): newProf = profList[random.randrange(len(profList))]
# If not
else: newProf = profList[indexNotZero[0]]
# If there are more then 1 Prof to chose
else:
newProf, _, _ = rouletteWheel(profList, morePrefValueList, objectiveNum=1, repos=0)
newProf = newProf[0]
# Setting the new relation, creating new Candidate and returning it
relations[relation_will_change_index]=[subjList, newProf]
# Setting the flag to finish the while
flag_work_done = True
# Generating a new candidate
newCand = objects.Candidate()
newCand.setRelationsList(relations)
return flag_work_done, newCand
#==============================================================================================================
# Make a crossover between two solutions
def crossover(cand1, cand2, twoPointsCross=-1):
# The number of changes between parents will always be equal (same crossover segment size), never same size of Num of Parents Relations
# twoPointsCross = False -> its chosen only one point, will have changes from the 0 relation till the chosed point
# What is equal '-1' will be a random choice
if(twoPointsCross == -1): twoPointsCross = random.choice([True, False])
# Getting all relations from Candidates to work with
relations1 = cand1.getRelationsList()[:]
relations2 = cand2.getRelationsList()[:]
# OnePoint type:
if(not twoPointsCross):
point1 = 0 # Default initial point ('first-half') - if we make changes on 'second-half' the result woud be the same
point2 = random.randrange(len(relations1)) # Randomly choosing other point that can be equal to 'point1'
# Granting that not occur only a copy of parents - the chosen point is not the last relation
while(point2 == len(relations1)-1): point2 = random.randrange(len(relations1))
# twoPointsCross Type
else:
# Generating, randomly two numbers to create a patch - can be a single modification (when p1=p2)
point1, point2 = random.randrange(len(relations1)), random.randrange(len(relations1))
# Granting that 'point2' is bigger than 'point1'
if(point2 < point1):
p = point1
point1 = point2
point2 = p
# Granting that the crossover do not only copy all relations of one Cand to the another
while(point2 - point1 == len(relations1) - 1):
# Generating, randomly two numbers to create a patch - can be a single modification (when p1==p2)
point1, point2 = random.randrange(len(relations1)), random.randrange(len(relations1))
# Granting that 'point2' is bigger than 'point1'
if(point2 < point1):
p = point1
point1 = point2
point2 = p
# Passing through the relations between Parents making all changes
while (point1 <= point2):
# Recording the original relations
s1, p1 = relations1[point1]
s2, p2 = relations2[point1]
# Making the exchange of relations (changing only professors)
relations1[point1] = s1, p2
relations2[point1] = s2, p1
# Next relation
point1 = point1 + 1
# Creating and setting the two new Candidates
newCand1, newCand2 = objects.Candidate(), objects.Candidate()
newCand1.setRelationsList(relations1)
newCand2.setRelationsList(relations2)
# Returning the new Candidates
return newCand1, newCand2
#==============================================================================================================
# Selection by elitism
def elitismSelection(objectsList, valuesList, objectiveNum):
selectedObj = [] # List with the selected Objects
objectsList = objectsList[:]
valuesList = valuesList[:]
# Getting the maximal Value Solutions
while(len(selectedObj) < objectiveNum):
# Finding the maximal value in the list and its respective index
maxValue = max(valuesList)
maxIndex = valuesList.index(maxValue)
# Adding selected object to list
selectedObj.append(objectsList[maxIndex])
# Removing maximal Value/Object to next selection
valuesList.pop(maxIndex)
objectsList.pop(maxIndex)
return selectedObj, objectsList, valuesList
#==============================================================================================================
# Make selection of objects by Roulette Wheel
def rouletteWheel(objectsList, valuesList, objectiveNum, repos=0):
# objectiveNum: Num of objects will be selected
# repos: Type of wheel (with reposition)
# Making a copy of the original lists to work with
objectsList = objectsList[:]
valuesList = valuesList[:]
# List with the selected Objects
selectedObj = []
# Flag that allows to make all important calcs at least one time when the Roulette is configured to have Reposition
reCalc = True
while(len(selectedObj) < objectiveNum):
# Allow the Updating of the data for the next Roulette Round without the object that was recent selected on past round
if(reCalc):
# When the Roulette process does have reposition of objects
if(repos): reCalc = False
# Find the total Value of the Objects
totalValue = sum([value for value in valuesList])
# If all values are Zero
if(totalValue == 0.0):
valuesList = [1.0 for _ in valuesList]
totalValue = len(valuesList)
# Calculate the prob. of a selection for each object
probObj = [float(value / totalValue) for value in valuesList]
# Calculate a cumulative prob. for each object
cumulative = 0.0
cumulativeProbObj = []
for q in probObj:
qNew = q + cumulative
cumulativeProbObj.append(qNew)
cumulative = qNew
# MAIN Roulette Wheel Selection process (one round)
probPrev = 0.0
r = float(random.randrange(101) / 100.0)
#r = float(random.randrange(0, 1, 0.001))
for i in range(len(cumulativeProbObj)):
if(probPrev < r and r <= cumulativeProbObj[i]):
# Adding the selected Object to 'selectedObj'
selectedObj.append(objectsList[i])
if(not repos):
# Removing the selected object/value from 'valuesList' to do next roulette process
valuesList.pop(i)
objectsList.pop(i)
break
probPrev = cumulativeProbObj[i]
# Removing from 'objectsList' the selected objects (not removed before because of the reposition)
# If there are repeated objects, (objectsList + selectedObj) will be larger then original objectsList size
if(repos):
for i in selectedObj:
try:
index = objectsList.index(i)
objectsList.pop(index)
valuesList.pop(index)
except ValueError: index = -1
return selectedObj, objectsList, valuesList
#==============================================================================================================
# Detect the stop condition
def stop(asks, curr_Iter, maxNum_Iter, lastMaxFit_Iter, convergDetect, maxFitFea):
if(curr_Iter == maxNum_Iter): return (True if asks == 0 else ioData.askStop()) # Reached max num of iterations
if(convergDetect != 0 and curr_Iter - lastMaxFit_Iter == convergDetect): return (True if asks == 0 else ioData.askStop()) # Reached convergence num of iterations
return False # Continues the run with same num of iterations
#==============================================================================================================
| 53.015416
| 165
| 0.615462
| 5,815
| 51,584
| 5.386414
| 0.111264
| 0.035821
| 0.011494
| 0.007375
| 0.344103
| 0.279388
| 0.227189
| 0.199732
| 0.189388
| 0.173233
| 0
| 0.011659
| 0.258413
| 51,584
| 972
| 166
| 53.069959
| 0.807131
| 0.398593
| 0
| 0.219298
| 0
| 0
| 0.009807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063596
| false
| 0
| 0.006579
| 0
| 0.122807
| 0.02193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11e388ebd565f092940b5ad2ddba87b868dac5de
| 3,171
|
py
|
Python
|
HyperV/WS2012R2/stress/StorVSCIOZoneTest.py
|
microsoft/FreeBSD-Test-Automation
|
e96a84054d771ece83908299d37e3c02a19f98b3
|
[
"Apache-2.0"
] | 1
|
2020-01-16T08:45:59.000Z
|
2020-01-16T08:45:59.000Z
|
HyperV/WS2012R2/stress/StorVSCIOZoneTest.py
|
LIS/FreeBSD-Test-Automation
|
e96a84054d771ece83908299d37e3c02a19f98b3
|
[
"Apache-2.0"
] | null | null | null |
HyperV/WS2012R2/stress/StorVSCIOZoneTest.py
|
LIS/FreeBSD-Test-Automation
|
e96a84054d771ece83908299d37e3c02a19f98b3
|
[
"Apache-2.0"
] | 1
|
2021-08-03T00:22:40.000Z
|
2021-08-03T00:22:40.000Z
|
#!/usr/bin/env python
import sys
import os
import time
import test_class
import subprocess
class StorVSCIOZoneTest(test_class.TestClass):
def _set_up_vm(self, vm_name, args):
# this piece of code will be executed first thing after the VM is
# booted up
args['working_dir'] = self._test_param(None)['working_dir']
test_class._run_on_vm(self, vm_name, "install_iozone", args)
test_class._run_on_vm(self, vm_name, "format_drive", args)
def _set_up_host(self, host_name, args):
# BEFORE the VM boots up, this function will be called to prepare
# the host.
# Tasks could include creating VM, configuring VM and install host
# software.
pass
def format_drive(self, args):
DEFAULT_SCSI_DRIVE = '/dev/da1'
if os.path.exists(DEFAULT_SCSI_DRIVE + 'p1'):
# delete the partition
subprocess.call(["gpart", "delete", "-i", "1", DEFAULT_SCSI_DRIVE])
subprocess.call(["gpart", "destroy", DEFAULT_SCSI_DRIVE])
time.sleep(2)
subprocess.call(["gpart", "create", "-s", "GPT", DEFAULT_SCSI_DRIVE])
subprocess.call(["gpart", "add", "-t", "freebsd-ufs", DEFAULT_SCSI_DRIVE])
subprocess.call(["newfs", DEFAULT_SCSI_DRIVE + "p1"])
time.sleep(5)
subprocess.call(["mount", DEFAULT_SCSI_DRIVE + "p1", args['working_dir']])
def install_iozone(self, args):
logfile = open('install-iozone.log', 'w')
p = subprocess.Popen(["pkg", "install", "-y" , "iozone"],
stdout = logfile,
stderr = logfile)
p.wait()
logfile.close()
def run_iozone(self, args):
# remember to copy the logs
logfile = open('iozone.log', 'w')
# make IOZone run on a separate drive
os.chdir(args['working_dir'])
p = subprocess.Popen(["iozone", "-a", "-z", "-g10g", "-Vshostc"],
stdout=logfile,
stderr=logfile)
p.wait()
logfile.close()
def _run(self, args):
# get a host...
# yes I know it's ugly
host_one = self._machines[0]['host']
# get a VM
vm_one = self._machines[0]['vms'][0]['name']
args['working_dir'] = self._test_param(None)['working_dir']
test_class._run_on_vm(self, vm_one, "run_iozone", args)
def _tear_down(self, args):
pass
def _request_machines(self):
# EXAMPLE: requesting machines from pool
# the size of the request array will be the number of hosts
# required, and each array element indicates how many VMs are
# required on that host.
# only 1 VM on 1 host is required
request = {'pool': 'stress',
'desc': 'storvsc_IOZone',
'req': [1]
}
return request
def _test_param(self, args):
param = {
'multi-threaded': True,
'snapshot': 'ICABase',
'remote_path': '/root/',
'working_dir': '/mnt/test'
}
return param
| 32.030303
| 82
| 0.561337
| 385
| 3,171
| 4.438961
| 0.387013
| 0.051492
| 0.074898
| 0.021065
| 0.207139
| 0.189585
| 0.148625
| 0.148625
| 0.131071
| 0.131071
| 0
| 0.006903
| 0.314727
| 3,171
| 98
| 83
| 32.357143
| 0.779567
| 0.1848
| 0
| 0.20339
| 0
| 0
| 0.143414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0.033898
| 0.084746
| 0
| 0.288136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11e3f9c5f47a0f678f4c4be381a8ca3e9eaec6d2
| 16,809
|
py
|
Python
|
LDDMM_Python/lddmm_python/lib/plotly/colors.py
|
tt6746690/lddmm-ot
|
98e45d44969221b0fc8206560d9b7a655ef7e137
|
[
"MIT"
] | 48
|
2017-08-04T03:30:22.000Z
|
2022-03-09T03:24:11.000Z
|
LDDMM_Python/lddmm_python/lib/plotly/colors.py
|
hushunbo/lddmm-ot
|
5af26fe32ae440c598ed403ce2876e98d6e1c692
|
[
"MIT"
] | null | null | null |
LDDMM_Python/lddmm_python/lib/plotly/colors.py
|
hushunbo/lddmm-ot
|
5af26fe32ae440c598ed403ce2876e98d6e1c692
|
[
"MIT"
] | 15
|
2017-09-30T18:55:48.000Z
|
2021-04-27T18:27:55.000Z
|
"""
colors
=====
Functions that manipulate colors and arrays of colors
There are three basic types of color types: rgb, hex and tuple:
rgb - An rgb color is a string of the form 'rgb(a,b,c)' where a, b and c are
floats between 0 and 255 inclusive.
hex - A hex color is a string of the form '#xxxxxx' where each x is a
character that belongs to the set [0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f]. This is
just the list of characters used in the hexadecimal numeric system.
tuple - A tuple color is a 3-tuple of the form (a,b,c) where a, b and c are
floats between 0 and 1 inclusive.
"""
from __future__ import absolute_import
from plotly import exceptions
from numbers import Number
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
PLOTLY_SCALES = {
'Greys': [
[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']
],
'YlGnBu': [
[0, 'rgb(8,29,88)'], [0.125, 'rgb(37,52,148)'],
[0.25, 'rgb(34,94,168)'], [0.375, 'rgb(29,145,192)'],
[0.5, 'rgb(65,182,196)'], [0.625, 'rgb(127,205,187)'],
[0.75, 'rgb(199,233,180)'], [0.875, 'rgb(237,248,217)'],
[1, 'rgb(255,255,217)']
],
'Greens': [
[0, 'rgb(0,68,27)'], [0.125, 'rgb(0,109,44)'],
[0.25, 'rgb(35,139,69)'], [0.375, 'rgb(65,171,93)'],
[0.5, 'rgb(116,196,118)'], [0.625, 'rgb(161,217,155)'],
[0.75, 'rgb(199,233,192)'], [0.875, 'rgb(229,245,224)'],
[1, 'rgb(247,252,245)']
],
'YlOrRd': [
[0, 'rgb(128,0,38)'], [0.125, 'rgb(189,0,38)'],
[0.25, 'rgb(227,26,28)'], [0.375, 'rgb(252,78,42)'],
[0.5, 'rgb(253,141,60)'], [0.625, 'rgb(254,178,76)'],
[0.75, 'rgb(254,217,118)'], [0.875, 'rgb(255,237,160)'],
[1, 'rgb(255,255,204)']
],
'Bluered': [
[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']
],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
'RdBu': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'], [0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'], [1, 'rgb(178,10,28)']
],
# Scale for non-negative numeric values
'Reds': [
[0, 'rgb(220,220,220)'], [0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'], [1, 'rgb(178,10,28)']
],
# Scale for non-positive numeric values
'Blues': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'], [0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'], [1, 'rgb(220,220,220)']
],
'Picnic': [
[0, 'rgb(0,0,255)'], [0.1, 'rgb(51,153,255)'],
[0.2, 'rgb(102,204,255)'], [0.3, 'rgb(153,204,255)'],
[0.4, 'rgb(204,204,255)'], [0.5, 'rgb(255,255,255)'],
[0.6, 'rgb(255,204,255)'], [0.7, 'rgb(255,153,255)'],
[0.8, 'rgb(255,102,204)'], [0.9, 'rgb(255,102,102)'],
[1, 'rgb(255,0,0)']
],
'Rainbow': [
[0, 'rgb(150,0,90)'], [0.125, 'rgb(0,0,200)'],
[0.25, 'rgb(0,25,255)'], [0.375, 'rgb(0,152,255)'],
[0.5, 'rgb(44,255,150)'], [0.625, 'rgb(151,255,0)'],
[0.75, 'rgb(255,234,0)'], [0.875, 'rgb(255,111,0)'],
[1, 'rgb(255,0,0)']
],
'Portland': [
[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'],
[0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'],
[1, 'rgb(217,30,30)']
],
'Jet': [
[0, 'rgb(0,0,131)'], [0.125, 'rgb(0,60,170)'],
[0.375, 'rgb(5,255,255)'], [0.625, 'rgb(255,255,0)'],
[0.875, 'rgb(250,0,0)'], [1, 'rgb(128,0,0)']
],
'Hot': [
[0, 'rgb(0,0,0)'], [0.3, 'rgb(230,0,0)'],
[0.6, 'rgb(255,210,0)'], [1, 'rgb(255,255,255)']
],
'Blackbody': [
[0, 'rgb(0,0,0)'], [0.2, 'rgb(230,0,0)'],
[0.4, 'rgb(230,210,0)'], [0.7, 'rgb(255,255,255)'],
[1, 'rgb(160,200,255)']
],
'Earth': [
[0, 'rgb(0,0,130)'], [0.1, 'rgb(0,180,180)'],
[0.2, 'rgb(40,210,40)'], [0.4, 'rgb(230,230,50)'],
[0.6, 'rgb(120,70,20)'], [1, 'rgb(255,255,255)']
],
'Electric': [
[0, 'rgb(0,0,0)'], [0.15, 'rgb(30,0,100)'],
[0.4, 'rgb(120,0,100)'], [0.6, 'rgb(160,90,0)'],
[0.8, 'rgb(230,200,0)'], [1, 'rgb(255,250,220)']
],
'Viridis': [
[0, '#440154'], [0.06274509803921569, '#48186a'],
[0.12549019607843137, '#472d7b'], [0.18823529411764706, '#424086'],
[0.25098039215686274, '#3b528b'], [0.3137254901960784, '#33638d'],
[0.3764705882352941, '#2c728e'], [0.4392156862745098, '#26828e'],
[0.5019607843137255, '#21918c'], [0.5647058823529412, '#1fa088'],
[0.6274509803921569, '#28ae80'], [0.6901960784313725, '#3fbc73'],
[0.7529411764705882, '#5ec962'], [0.8156862745098039, '#84d44b'],
[0.8784313725490196, '#addc30'], [0.9411764705882353, '#d8e219'],
[1, '#fde725']
]
}
def color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
def validate_colors(colors):
"""
Validates color(s) and returns an error for invalid colors
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
if isinstance(colors, dict):
colors_list.extend(colors.values())
elif isinstance(colors, list):
colors_list = colors
# Validate colors in colors_list
for j, each_color in enumerate(colors_list):
if 'rgb' in each_color:
each_color = color_parser(
each_color, unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
elif '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
elif isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
return colors
def convert_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted and
colortype will not be applicable
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return PLOTLY_SCALES[colors]
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a Plotly "
"scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
elif isinstance(colors, list):
colors_list = colors
# convert all colors to rgb
for j, each_color in enumerate(colors_list):
if '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
elif isinstance(each_color, tuple):
each_color = color_parser(
each_color, convert_to_RGB_255
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
if colortype == 'rgb':
return colors_list
elif colortype == 'tuple':
for j, each_color in enumerate(colors_list):
each_color = color_parser(
each_color, unlabel_rgb
)
each_color = color_parser(
each_color, unconvert_from_RGB_255
)
colors_list[j] = each_color
return colors_list
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def convert_dict_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted
"""
for key in colors:
if '#' in colors[key]:
colors[key] = color_parser(
colors[key], hex_to_rgb
)
colors[key] = color_parser(
colors[key], label_rgb
)
elif isinstance(colors[key], tuple):
colors[key] = color_parser(
colors[key], convert_to_RGB_255
)
colors[key] = color_parser(
colors[key], label_rgb
)
if colortype == 'rgb':
return colors
elif colortype == 'tuple':
for key in colors:
colors[key] = color_parser(
colors[key], unlabel_rgb
)
colors[key] = color_parser(
colors[key], unconvert_from_RGB_255
)
return colors
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and a scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
colorscale = []
# validate minimum colors length of 2
if len(colors) < 2:
raise exceptions.PlotlyError("You must input a list of colors that "
"has at least two colors.")
if not scale:
scale_incr = 1./(len(colors) - 1)
return [[i * scale_incr, color] for i, color in enumerate(colors)]
else:
# validate scale
if len(colors) != len(scale):
raise exceptions.PlotlyError("The length of colors and scale "
"must be the same.")
if (scale[0] != 0) or (scale[-1] != 1):
raise exceptions.PlotlyError(
"The first and last number in scale must be 0.0 and 1.0 "
"respectively."
)
for j in range(1, len(scale)):
if scale[j] <= scale[j-1]:
raise exceptions.PlotlyError(
"'scale' must be a list that contains an increasing "
"sequence of numbers where the first and last number are"
"0.0 and 1.0 respectively."
)
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
def find_intermediate_color(lowcolor, highcolor, intermed):
"""
Returns the color at a given distance between two colors
This function takes two color tuples, where each element is between 0
and 1, along with a value 0 < intermed < 1 and returns a color that is
intermed-percent from lowcolor to highcolor
"""
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
inter_colors = (lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2)
return inter_colors
def unconvert_from_RGB_255(colors):
"""
Return a tuple where each element gets divided by 255
Takes a (list of) color tuple(s) where each element is between 0 and
255. Returns the same tuples where each tuple element is normalized to
a value between 0 and 1
"""
un_rgb_color = (colors[0]/(255.0),
colors[1]/(255.0),
colors[2]/(255.0))
return un_rgb_color
def convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
"""
return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)
def n_colors(lowcolor, highcolor, n_colors):
"""
Splits a low and high color into a list of n_colors colors in it
Accepts two color tuples and returns a list of n_colors colors
which form the intermediate colors between lowcolor and highcolor
from linearly interpolating through RGB space
"""
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0/(n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1/(n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2/(n_colors - 1)
color_tuples = []
for index in range(n_colors):
new_tuple = (lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2))
color_tuples.append(new_tuple)
return color_tuples
def label_rgb(colors):
"""
Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'
"""
return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))
def unlabel_rgb(colors):
"""
Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)
This function takes either an 'rgb(a, b, c)' color or a list of
such colors and returns the color tuples in tuple(s) (a, b, c)
"""
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == ',' or colors[index] == '.':
str_vals = str_vals + colors[index]
str_vals = str_vals + ','
numbers = []
str_num = ''
for char in str_vals:
if char != ',':
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2])
def hex_to_rgb(value):
"""
Calculates rgb values from a hex color code.
:param (string) value: Hex color string
:rtype (tuple) (r_value, g_value, b_value): tuple of rgb values
"""
value = value.lstrip('#')
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(int(value[i:i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length))
def colorscale_to_colors(colorscale):
"""
Converts a colorscale into a list of colors
"""
color_list = []
for color in colorscale:
color_list.append(color[1])
return color_list
| 32.638835
| 77
| 0.55268
| 2,340
| 16,809
| 3.881624
| 0.17906
| 0.007266
| 0.028625
| 0.005285
| 0.372234
| 0.320379
| 0.291754
| 0.246064
| 0.205879
| 0.173511
| 0
| 0.129802
| 0.304718
| 16,809
| 514
| 78
| 32.702335
| 0.647386
| 0.207567
| 0
| 0.349544
| 0
| 0
| 0.20131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039514
| false
| 0
| 0.009119
| 0
| 0.112462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11e3feaa8eddda799c32e0dc2f9c36ee4b41ba9c
| 420
|
py
|
Python
|
nonebot/consts.py
|
he0119/nonebot2
|
bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e
|
[
"MIT"
] | 1
|
2022-01-26T12:52:33.000Z
|
2022-01-26T12:52:33.000Z
|
nonebot/consts.py
|
he0119/nonebot2
|
bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e
|
[
"MIT"
] | null | null | null |
nonebot/consts.py
|
he0119/nonebot2
|
bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e
|
[
"MIT"
] | null | null | null |
# used by Matcher
RECEIVE_KEY = "_receive_{id}"
LAST_RECEIVE_KEY = "_last_receive"
ARG_KEY = "{key}"
REJECT_TARGET = "_current_target"
REJECT_CACHE_TARGET = "_next_target"
# used by Rule
PREFIX_KEY = "_prefix"
CMD_KEY = "command"
RAW_CMD_KEY = "raw_command"
CMD_ARG_KEY = "command_arg"
SHELL_ARGS = "_args"
SHELL_ARGV = "_argv"
REGEX_MATCHED = "_matched"
REGEX_GROUP = "_matched_groups"
REGEX_DICT = "_matched_dict"
| 20
| 36
| 0.757143
| 60
| 420
| 4.7
| 0.433333
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 420
| 20
| 37
| 21
| 0.770492
| 0.066667
| 0
| 0
| 0
| 0
| 0.359897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11ed16385a989b7c743480e1ee477feb796f62cc
| 9,845
|
py
|
Python
|
iaso/tests/api/test_token.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 29
|
2020-12-26T07:22:19.000Z
|
2022-03-07T13:40:09.000Z
|
iaso/tests/api/test_token.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 150
|
2020-11-09T15:03:27.000Z
|
2022-03-07T15:36:07.000Z
|
iaso/tests/api/test_token.py
|
ekhalilbsq/iaso
|
e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee
|
[
"MIT"
] | 4
|
2020-11-09T10:38:13.000Z
|
2021-10-04T09:42:47.000Z
|
from django.test import tag
from django.core.files import File
from unittest import mock
from iaso import models as m
from iaso.test import APITestCase
class TokenAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
data_source = m.DataSource.objects.create(name="counsil")
version = m.SourceVersion.objects.create(data_source=data_source, number=1)
star_wars = m.Account.objects.create(name="Star Wars", default_version=version)
cls.yoda = cls.create_user_with_profile(username="yoda", account=star_wars)
cls.yoda.set_password("IMomLove")
cls.yoda.save()
cls.jedi_council = m.OrgUnitType.objects.create(name="Jedi Council", short_name="Cnc")
cls.jedi_council_corruscant = m.OrgUnit.objects.create(name="Corruscant Jedi Council")
cls.project = m.Project.objects.create(
name="Hydroponic gardens",
app_id="stars.empire.agriculture.hydroponics",
account=star_wars,
needs_authentication=True,
)
cls.form_1 = m.Form.objects.create(name="Hydroponics study", period_type=m.MONTH, single_per_period=True)
cls.form_2 = m.Form.objects.create(
name="Hydroponic public survey",
form_id="sample2",
device_field="deviceid",
location_field="geoloc",
period_type="QUARTER",
single_per_period=True,
)
form_2_file_mock = mock.MagicMock(spec=File)
form_2_file_mock.name = "test.xml"
cls.form_2.form_versions.create(file=form_2_file_mock, version_id="2020022401")
cls.form_2.org_unit_types.add(cls.jedi_council)
cls.create_form_instance(form=cls.form_2, period="202001", org_unit=cls.jedi_council_corruscant)
cls.form_2.save()
cls.project.unit_types.add(cls.jedi_council)
cls.project.forms.add(cls.form_1)
cls.project.forms.add(cls.form_2)
cls.project.save()
def authenticate_using_token(self):
response = self.client.post(f"/api/token/", data={"username": "yoda", "password": "IMomLove"}, format="json")
self.assertJSONResponse(response, 200)
response_data = response.json()
access_token = response_data.get("access")
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer {access_token}")
return response_data
def test_acquire_token_and_authenticate(self):
"""Test token authentication"""
self.authenticate_using_token()
response = self.client.get("/api/forms/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 200)
response_data = response.json()
form_ids = [f["id"] for f in response_data["forms"]]
self.assertTrue(self.form_2.id in form_ids)
def test_acquire_token_and_post_instance(self):
"""Test upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
self.authenticate_using_token()
uuid = "4b7c3954-f69a-4b99-83b1-df73957b32E1"
instance_body = [
{
"id": uuid,
"latitude": 4.4,
"created_at": 1565258153704,
"updated_at": 1565258153704,
"orgUnitId": self.jedi_council_corruscant.id,
"formId": self.form_2.id,
"longitude": 4.4,
"accuracy": 10,
"altitude": 100,
"file": "\/storage\/emulated\/0\/odk\/instances\/RDC Collecte Data DPS_2_2019-08-08_11-54-46\/RDC Collecte Data DPS_2_2019-08-08_11-54-46.xml",
"name": "the name",
}
]
response = self.client.post(
"/api/instances/?app_id=stars.empire.agriculture.hydroponics", data=instance_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(m.Instance.objects.filter(uuid=uuid).first() is not None)
def test_unauthenticated_post_instance(self):
"""Test unauthenticated upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
uuid = "4b7c3954-f69a-4b99-83b1-df73957b32E2"
instance_body = [
{
"id": uuid,
"latitude": 4.4,
"created_at": 1565258153704,
"updated_at": 1565258153704,
"orgUnitId": self.jedi_council_corruscant.id,
"formId": self.form_2.id,
"longitude": 4.4,
"accuracy": 10,
"altitude": 100,
"file": "\/storage\/emulated\/0\/odk\/instances\/RDC Collecte Data DPS_2_2019-08-08_11-54-46\/RDC Collecte Data DPS_2_2019-08-08_11-54-46.xml",
"name": "the name",
}
]
response = self.client.post(
"/api/instances/?app_id=stars.empire.agriculture.hydroponics", data=instance_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertIsNone(m.Instance.objects.filter(uuid=uuid).first())
# The result is that the instance is not created, even though the api sent back a 200
# this is normal: we want the api to accept all creations requests to be able to debug on the server
# and not have data stuck on a mobile phone.
# An APIImport record with has_problem set to True should be created
self.assertAPIImport(
"instance",
request_body=instance_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
def test_refresh(self):
"""Test refreshing authentication token"""
# Unauthenticated case is already tested in test_api
response_data = self.authenticate_using_token()
refresh_token = response_data.get("refresh")
response = self.client.post(f"/api/token/refresh/", data={"refresh": refresh_token}, format="json")
self.assertJSONResponse(response, 200)
response_data = response.json()
access_token_2 = response_data.get("access")
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer {access_token_2}")
# test an endpoint that requires authentication
response = self.client.get("/api/orgunits/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 200)
def test_no_token(self):
"""Test invalid authentication tokens"""
# Unauthenticated case is already tested in test_api
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer ")
# test an endpoint that requires authentication
response = self.client.get("/api/groups/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 403)
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer WRONG")
# test an endpoint that requires authentication
response = self.client.get("/api/groups/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 403)
def test_acquire_token_and_post_org_unit(self):
"""Test upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
self.authenticate_using_token()
uuid = "r5dx2671-bb59-4fb2-a4a0-4af80573e2de"
name = "Kashyyyk Wookies Council"
unit_body = [
{
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"org_unit_type_id": self.jedi_council.id,
"parent_id": None,
"longitude": 0,
"accuracy": 0,
"altitude": 0,
"time": 0,
"name": name,
}
]
response = self.client.post(
"/api/orgunits/?app_id=stars.empire.agriculture.hydroponics", data=unit_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(m.OrgUnit.objects.filter(uuid=uuid).first() is not None)
self.assertAPIImport("orgUnit", request_body=unit_body, has_problems=False, check_auth_header=True)
def test_unauthenticated_post_org_unit(self):
"""Test upload to a project that requires authentication without token"""
# Unauthenticated case is already tested in test_api
uuid = "s5dx2671-ac59-4fb2-a4a0-4af80573e2de"
name = "Antar 4 Council"
unit_body = [
{
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"org_unit_type_id": self.jedi_council.id,
"parent_id": None,
"longitude": 0,
"accuracy": 0,
"altitude": 0,
"time": 0,
"name": name,
}
]
response = self.client.post(
"/api/orgunits/?app_id=stars.empire.agriculture.hydroponics", data=unit_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertIsNone(m.OrgUnit.objects.filter(uuid=uuid).first())
# The result is that the org unit is not created, even though the api sent back a 200
# this is normal: we want the api to accept all creations requests to be able to debug on the server
# and not have data stuck on a mobile phone.
# An APIImport record with has_problem set to True should be created
self.assertAPIImport(
"orgUnit",
request_body=unit_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
| 40.020325
| 159
| 0.622854
| 1,154
| 9,845
| 5.146447
| 0.205373
| 0.023573
| 0.030308
| 0.024247
| 0.703991
| 0.676376
| 0.653982
| 0.614245
| 0.59707
| 0.558512
| 0
| 0.048238
| 0.27354
| 9,845
| 245
| 160
| 40.183673
| 0.782159
| 0.139563
| 0
| 0.451977
| 0
| 0.011299
| 0.205606
| 0.100131
| 0
| 0
| 0
| 0
| 0.101695
| 1
| 0.050847
| false
| 0.011299
| 0.045198
| 0
| 0.107345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f229b9297d3ad1a65bef9c394df841a9ccc992
| 6,552
|
py
|
Python
|
interpro.py
|
TAMU-CPT/blast-db-download
|
53261f08d1f9193c4f538fa90983a465502190a9
|
[
"BSD-3-Clause"
] | null | null | null |
interpro.py
|
TAMU-CPT/blast-db-download
|
53261f08d1f9193c4f538fa90983a465502190a9
|
[
"BSD-3-Clause"
] | 3
|
2017-09-15T18:58:21.000Z
|
2020-03-24T19:11:16.000Z
|
interpro.py
|
TAMU-CPT/blast-db-download
|
53261f08d1f9193c4f538fa90983a465502190a9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import time
import datetime
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('dl')
NOW = datetime.datetime.now()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DOWNLOAD_ROOT = os.getcwd()
VERSION = '5.22-61.0'
PANTHER_VERSION = '11.1'
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class XUnitReportBuilder(object):
XUNIT_TPL = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="{suite_name}" tests="{total}" errors="{errors}" failures="{failures}" skip="{skips}">
{test_cases}
</testsuite>
"""
TESTCASE_TPL = """ <testcase classname="{classname}" name="{name}" {time}>
{error}
</testcase>"""
ERROR_TPL = """ <error type="{test_name}" message="{errorMessage}">{errorDetails}
</error>"""
def __init__(self, suite_name):
self.xunit_data = {
'total': 0, 'errors': 0, 'failures': 0, 'skips': 0
}
self.test_cases = []
self.suite_name = suite_name
def ok(self, classname, test_name, time=0):
log.info("OK: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors="", time=time)
def error(self, classname, test_name, errorMessage, errorDetails="", time=0):
log.info("ERROR: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=self.ERROR_TPL.format(
errorMessage=errorMessage, errorDetails=errorDetails, test_name=test_name), time=time)
def failure(self, classname, test_name, errorMessage, errorDetails="", time=0):
log.info("FAIL: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=self.ERROR_TPL.format(
errorMessage=errorMessage, errorDetails=errorDetails, test_name=test_name), time=time)
def skip(self, classname, test_name, time=0):
log.info("SKIP: [%s] %s", classname, test_name)
self.xunit_data['skips'] += 1
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=" <skipped />", time=time)
def __add_test(self, name, classname, errors, time=0):
t = 'time="%s"' % time
self.test_cases.append(
self.TESTCASE_TPL.format(name=name, error=errors, classname=classname, time=t))
def serialize(self):
self.xunit_data['test_cases'] = '\n'.join(self.test_cases)
self.xunit_data['suite_name'] = self.suite_name
return self.XUNIT_TPL.format(**self.xunit_data)
xunit = XUnitReportBuilder('interpro_installer')
def timedCommand(classname, testname, errormessage, test_file, command, shell=False, cwd=None):
if os.path.exists(test_file):
xunit.skip(classname, testname)
else:
try:
if not cwd:
cwd = DOWNLOAD_ROOT
with Timer() as t:
# If it's a shell command we automatically join things
# to make our timedCommand calls completely uniform
log.info('cd %s && ' % cwd + ' '.join(command))
if shell:
command = ' '.join(command)
subprocess.check_call(command, shell=shell, cwd=cwd)
xunit.ok(classname, testname, time=t.interval)
except subprocess.CalledProcessError as cpe:
xunit.failure(classname, testname, errormessage, errorDetails=str(cpe), time=t.interval)
raise Exception("Cannot continute")
def interpro():
classname = 'interpro'
extracted_dir = os.path.join(DOWNLOAD_ROOT, 'interproscan-' + VERSION)
data_dir = os.path.join(extracted_dir, 'data')
tarball = 'interproscan-%s-64-bit.tar.gz' % VERSION
panther_tarball = 'panther-data-%s.tar.gz' % PANTHER_VERSION
panther_tarball_md5 = panther_tarball + '.md5'
base_data_url = 'ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/data/'
# wget
md5sum = tarball + '.md5'
base_url = 'ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/%s/' % VERSION
timedCommand(classname, 'download.tarball', 'Download failed', tarball, [
'wget',
base_url + tarball,
'-O', tarball,
])
timedCommand(classname, 'download.md5sum', 'Download failed', md5sum, [
'wget',
base_url + md5sum,
'-O', md5sum,
])
timedCommand(classname, 'contents.verify', 'MD5SUM failed to validate', os.path.join(extracted_dir, 'interproscan.sh'), [
'md5sum', '-c', md5sum
])
timedCommand(classname, 'contents.extract', 'Failed to extract', os.path.join(extracted_dir, 'interproscan.sh'), [
'tar', 'xvfz', tarball
])
timedCommand(classname, 'setup.phobius', 'Failed to install phobius', os.path.join(extracted_dir, 'bin', 'phobius', '1.01', 'phobius.pl'), [
'tar', 'xvfz', os.path.join(os.path.pardir, 'phobius.tgz')
], cwd=extracted_dir)
timedCommand(classname, 'setup.signalp', 'Failed to install signalp', os.path.join(extracted_dir, 'bin', 'signalp', '4.1', 'signalp'), [
'tar', 'xvfz', os.path.join(os.path.pardir, 'signalp.tgz')
], cwd=extracted_dir)
timedCommand(classname, 'panther.download_tarball', 'Download failed', os.path.join(extracted_dir, 'data', panther_tarball), [
'wget',
base_data_url + panther_tarball,
'-O', os.path.join(extracted_dir, 'data', panther_tarball),
])
timedCommand(classname, 'panther.download_md5sum', 'Download failed', os.path.join(extracted_dir, 'data', panther_tarball_md5), [
'wget',
base_data_url + panther_tarball_md5,
'-O', os.path.join(extracted_dir, 'data', panther_tarball_md5),
])
timedCommand(classname, 'panther.verify', 'MD5SUM failed to validate', os.path.join(extracted_dir, 'data', 'panther'), [
'md5sum', '-c', panther_tarball_md5
], cwd=data_dir)
timedCommand(classname, 'panther.extract', 'Failed to extract', os.path.join(extracted_dir, 'data', 'panther'), [
'tar', 'xvfz', panther_tarball
], cwd=data_dir)
if __name__ == '__main__':
try:
interpro()
except Exception:
pass
finally:
# Write out the report
with open(sys.argv[1], 'w') as handle:
handle.write(xunit.serialize())
| 36.808989
| 144
| 0.6308
| 793
| 6,552
| 5.039092
| 0.218159
| 0.028529
| 0.035035
| 0.052302
| 0.368118
| 0.362613
| 0.30956
| 0.297047
| 0.258008
| 0.213964
| 0
| 0.010557
| 0.219322
| 6,552
| 177
| 145
| 37.016949
| 0.770674
| 0.022741
| 0
| 0.175182
| 0
| 0.021898
| 0.20944
| 0.044545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080292
| false
| 0.007299
| 0.043796
| 0
| 0.175182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f3026c5b723ebaca4c3ade5e133a02d8fccef0
| 6,423
|
py
|
Python
|
Developing.../main01.py
|
MuhikaThomas/Pro-forma
|
da97d9a6581f4dfbd06fe4a0db1128ebb7472d81
|
[
"MIT"
] | null | null | null |
Developing.../main01.py
|
MuhikaThomas/Pro-forma
|
da97d9a6581f4dfbd06fe4a0db1128ebb7472d81
|
[
"MIT"
] | null | null | null |
Developing.../main01.py
|
MuhikaThomas/Pro-forma
|
da97d9a6581f4dfbd06fe4a0db1128ebb7472d81
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
Builder.load_string("""
""")
class Proforma(App):
def build(self):
#*******ROOTWIDGET*******
layout = GridLayout(rows=2)
#*******SUB-WIDGETS*******
layoutTop = GridLayout(cols=3,rows=1)#SUB-WIDGET-1
layoutTop.size_hint = (1, 0.1)
layoutMid = GridLayout(cols=1, size_hint_x=1)#SUB-WIDGET-2
#*******CONTENT-OF-SUB-WIDGET-1*******
backbtn = Button()
title = Label(text = 'Pro-Forma App', font_size = '20sp', pos = (0,300), size_hint_y = None,size_hint_x=None, width=200, halign ='right', valign='middle')
title.size_hint = (None, 0.1)
dropbtn = Button()
#*******CONTENT-OF-SUB-WIDGET-2*******
tp_panel = TabbedPanel()
tp_panel.default_tab_text = "Login Tab"
#*******TAB1*******
th_tab1 = TabbedPanelHeader(text = 'Pro-Forma')
#*******MAIN-LAYOUT-FOR-TAB1*******
mainlayout = GridLayout(cols=1, spacing=10)
#*******LAYOUT-FOR-PROPERTY-INFORMATION*******
layouttab1 = GridLayout(cols=2,rows=6, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40)
#*******LAYOUT-FOR-UNIT-MIX*******
layoutmix = GridLayout(cols=4, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40)
#*******LAYOUT-FOR-EXPENSES*******
layoutexpense = GridLayout(cols=2)
#*******LAYOUT-FOR-ACCOUNTS*******
#*******CONTENT1*******
mainlayout.add_widget(Label(text='Property Information',size_hint_y=None, height=50))
#*******CONTENT2*******
layouttab1.add_widget(Label(text= 'Property Name', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Property Address', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Town/City', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Asking Price', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Total Units', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Square Footage', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
mainlayout.add_widget(layouttab1)
#*******CONTENT3*******
mainlayout.add_widget(Label(text='Unit Mix',size_hint_x=None, width=200, size_hint_y=None, height=50))
#*******CONTENT4*******
layoutmix.add_widget(Label(text='# of Units'))
layoutmix.add_widget(Label(text='Unit Type'))
layoutmix.add_widget(Label(text='SquareFeet'))
layoutmix.add_widget(Label(text='Monthly Rent'))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
mainlayout.add_widget(layoutmix)
#*******CONTENT5*******
mainlayout.add_widget(Label(text='Expenses',size_hint_x=None, width=200, size_hint_y=None, height=50))
#*******CONTENT6*******
layoutexpense.add_widget(Label(text='Accounting'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Advertising'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Bank Charges'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Electricity'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Gas'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Security'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='All insurance'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Permits and fees'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Maintenance'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Trash Pick-up'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='All other'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
mainlayout.add_widget(layoutexpense)
#*******CONTENT7*******
mainlayout.add_widget(Label(text='Accounts'))
#*******CONTENT7*******
#*******SCOLLABILITY*******
#*******CALLING-MAINLAYOUT-IN-TAB1*******
th_tab1.content = mainlayout
#___*******TAB2*******___#
th_tab2 = TabbedPanelHeader(text = 'Info. Tab')
#___*******TAB3*******___#
th_tab3 = TabbedPanelHeader(text = 'Due Deligence')
#___*******TAB4*******___#
th_tab4 = TabbedPanelHeader(text = 'Saved Reports')
#*******CALLING-TABS-TO-tp_panel*******
tp_panel.add_widget(th_tab1)
tp_panel.add_widget(th_tab2)
tp_panel.add_widget(th_tab3)
tp_panel.add_widget(th_tab4)
#*******ADDING-CONTENTS-OF-SUB-WIDGETS*******
layoutTop.add_widget(backbtn)
layoutTop.add_widget(title)
layoutTop.add_widget(dropbtn)
layoutMid.add_widget(tp_panel)
#*******ADDING-CONTENTS-OF-ROOT-WIDGET*******
layout.add_widget(layoutTop)
layout.add_widget(layoutMid)
#*******CALLING-THE-ROOT-WIDGET*******
return layout
if __name__ == '__main__':
Proforma().run()
| 47.932836
| 156
| 0.717422
| 867
| 6,423
| 5.096886
| 0.175317
| 0.120163
| 0.079203
| 0.101833
| 0.603983
| 0.514596
| 0.495135
| 0.495135
| 0.495135
| 0.492193
| 0
| 0.029226
| 0.089055
| 6,423
| 133
| 157
| 48.293233
| 0.72603
| 0.127978
| 0
| 0.216495
| 0
| 0
| 0.105603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.123711
| 0
| 0.154639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f30bdb0ea58245a57190b0de64ce5ae30b036d
| 1,943
|
py
|
Python
|
day8/day8.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
day8/day8.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
day8/day8.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
import itertools
from io import StringIO
from queue import LifoQueue
inputs = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
#data = [int(v) for v in StringIO(inputs).read().split(' ')]
data = [int(v) for v in open("day8.input").read().split(' ')]
def parse_packet(idata, lifoq_children, tc_metadata):
if not lifoq_children.empty():
c_childn_level = q.get()
else: c_childn_level = 1
for i in range(0, c_childn_level):
c_childn = next(idata, None)
if c_childn is None: break
# know iter not empty
c_metad = next(idata) # know has value
if c_childn > 0:
lifoq_children.put(c_childn)
tc_metadata += parse_packet(idata, lifoq_children, 0)
for i in range(0, c_metad):
md = next(idata)
tc_metadata += md
return tc_metadata
# idata = iter(data)
# q = LifoQueue()
# tc_metadata = parse_packet(idata, q, 0)
# print(tc_metadata)
# pt2
def parse_packet_pt2(idata, lifoq_children):
level_values = []
if not lifoq_children.empty():
c_childn_level = q.get()
else: c_childn_level = 1
for i in range(0, c_childn_level):
child_values = []
tc_metadata = 0
c_childn = next(idata, None)
if c_childn is None: break
# know iter not empty
c_metad = next(idata)
if c_childn > 0:
lifoq_children.put(c_childn)
# list of values from children
child_values = parse_packet_pt2(idata, lifoq_children)
for i in range(0, c_metad):
md = next(idata)
if c_childn == 0:
tc_metadata += md
else:
if md == 0: continue
if len(child_values) >= md:
tc_metadata += child_values[md-1]
level_values += [tc_metadata]
return level_values
idata = iter(data)
q = LifoQueue()
tc_metadata = parse_packet_pt2(idata, q)[0]
print(tc_metadata)
| 30.359375
| 66
| 0.5965
| 284
| 1,943
| 3.866197
| 0.228873
| 0.095628
| 0.065574
| 0.040073
| 0.663024
| 0.601093
| 0.468124
| 0.468124
| 0.468124
| 0.327869
| 0
| 0.029608
| 0.304683
| 1,943
| 63
| 67
| 30.84127
| 0.783124
| 0.124035
| 0
| 0.5
| 0
| 0
| 0.027187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.0625
| 0
| 0.145833
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f3952caf0eac585e166a957bfe31975eafdc39
| 2,971
|
py
|
Python
|
dataset_utils/roi.py
|
kocurvik/retinanet_traffic_3D
|
592ceac767750c65bb3d6678b36e6880a7bb0403
|
[
"Apache-2.0"
] | 12
|
2021-04-06T00:50:41.000Z
|
2022-03-23T03:27:02.000Z
|
dataset_utils/roi.py
|
kocurvik/retinanet_traffic_3D
|
592ceac767750c65bb3d6678b36e6880a7bb0403
|
[
"Apache-2.0"
] | 7
|
2021-07-13T12:47:41.000Z
|
2022-03-05T15:08:51.000Z
|
dataset_utils/roi.py
|
kocurvik/retinanet_traffic_3D
|
592ceac767750c65bb3d6678b36e6880a7bb0403
|
[
"Apache-2.0"
] | 4
|
2021-07-15T12:22:06.000Z
|
2022-03-01T03:12:36.000Z
|
import json
import os
import cv2
import numpy as np
from dataset_utils.geometry import computeCameraCalibration
def line_to_point(p1, p2, p3):
return np.abs(np.cross(p2 - p1, p3 - p1, axis=2) / np.linalg.norm(p2 - p1, axis=2))
def get_pts(vid_dir, json_path):
video_path = os.path.join(vid_dir, 'video.avi')
mask_path = os.path.join(vid_dir, 'video_mask.png')
with open(json_path, 'r+') as file:
# with open(os.path.join(os.path.dirname(json_path), 'system_retinanet_first.json'), 'r+') as file:
structure = json.load(file)
camera_calibration = structure['camera_calibration']
vp0, vp1, vp2, _, _, _ = computeCameraCalibration(camera_calibration["vp1"], camera_calibration["vp2"],
camera_calibration["pp"])
vp0 = vp0[:-1] / vp0[-1]
vp1 = vp1[:-1] / vp1[-1]
vp2 = vp2[:-1] / vp2[-1]
cap = cv2.VideoCapture(video_path)
ret, frame = cap.read()
frame = cv2.resize(frame, (640, 360))
vp0 = vp0 / 3
prvs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame)
accumulator = np.zeros([360, 640])
hsv[..., 1] = 255
y, x = np.mgrid[0:360, 0:640]
yx = np.stack([x, y], axis=2)
cnt = 0
while (cap.isOpened(), cnt < 10000):
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (640, 360))
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 7, 1.5, 0)
d = line_to_point(yx, yx + flow, vp0)
# for y in range(360):
# for x in range(640):
# p1 = np.array([x,y])
# d[y,x] = line_to_point(p1, p1 + flow[y,x], vp0)
accepted = np.zeros_like(d)
accepted[d < 3] = 1
n = np.linalg.norm(flow, axis=2)
accepted[n < 1] = 0
accepted[flow[:, :, 1] < 0] = 0
accumulator = accumulator + accepted
# mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
# hsv[..., 0] = ang * 180 / np.pi / 2
# hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
# bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# cv2.imshow('frame2', bgr)
cv2.imshow('accepted', accepted)
cv2.imshow('frame', frame2)
final = np.zeros_like(accumulator)
final[accumulator > 0.01 * np.max(accumulator)] = 1
cv2.imshow('accumulator', accumulator / np.max(accumulator))
cv2.imshow('norm', n / np.max(n))
cv2.imshow('final', final)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
prvs = next
cv2.destroyAllWindows()
cap.release()
if __name__ == '__main__':
vid_dir = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/dataset/session5_left'
result_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/results/session5_left/system_SochorCVIU_Edgelets_BBScale_Reg.json'
get_pts(vid_dir, result_path)
| 32.648352
| 126
| 0.582969
| 413
| 2,971
| 4.062954
| 0.324455
| 0.032181
| 0.019666
| 0.015495
| 0.06913
| 0.06913
| 0.06913
| 0
| 0
| 0
| 0
| 0.073838
| 0.261528
| 2,971
| 90
| 127
| 33.011111
| 0.690975
| 0.152137
| 0
| 0
| 0
| 0.017241
| 0.103668
| 0.066986
| 0
| 0
| 0.001595
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.086207
| 0.017241
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f661d7ecc4156688dc11d7e9f3988ffd85ee03
| 1,292
|
py
|
Python
|
src/ansible_remote_checks/modules/check_process.py
|
davidvoit/ansible_remote_checks
|
491f31855c96297e5466b238e648fa57c1e646d0
|
[
"MIT"
] | null | null | null |
src/ansible_remote_checks/modules/check_process.py
|
davidvoit/ansible_remote_checks
|
491f31855c96297e5466b238e648fa57c1e646d0
|
[
"MIT"
] | null | null | null |
src/ansible_remote_checks/modules/check_process.py
|
davidvoit/ansible_remote_checks
|
491f31855c96297e5466b238e648fa57c1e646d0
|
[
"MIT"
] | 1
|
2019-08-20T13:19:16.000Z
|
2019-08-20T13:19:16.000Z
|
#!/usr/bin/python2
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def get_procs(process_regex, cmdline_regex):
cmd=["ps","-hax","-o","comm pid args"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, error = process.communicate()
lines = output.splitlines()
processes = []
# Python3 reads the output as byte and needs decoding
try:
output = output.decode()
except (UnicodeDecodeError, AttributeError):
pass
for line in lines:
process = line.split()[0]
cmdline = ' '.join(line.split()[2:])
if (not process_regex or re.findall(process_regex, process)) and (not cmdline_regex or re.findall(cmdline_regex, cmdline)):
processes.append(line)
return {
"processes": processes
}
def main():
module_args= dict(
process = dict(type='str', required=True),
cmdline = dict(type='str')
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
process_regex = module.params['process']
cmdline_regex = module.params['cmdline']
procs = get_procs(process_regex, cmdline_regex)
try:
result = dict(procs = procs)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(**result)
if __name__ == '__main__':
main()
| 23.490909
| 127
| 0.69195
| 164
| 1,292
| 5.280488
| 0.493902
| 0.069284
| 0.034642
| 0.046189
| 0.073903
| 0.073903
| 0
| 0
| 0
| 0
| 0
| 0.003784
| 0.181889
| 1,292
| 54
| 128
| 23.925926
| 0.815516
| 0.053406
| 0
| 0.05
| 0
| 0
| 0.048321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.025
| 0.075
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f7ea214def9b4195dd57f26ec40b4d4be26bb2
| 972
|
py
|
Python
|
RESSPyLab/modified_cholesky.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 7
|
2019-10-15T09:16:41.000Z
|
2021-09-24T11:28:45.000Z
|
RESSPyLab/modified_cholesky.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 3
|
2020-10-22T14:27:22.000Z
|
2021-11-15T17:46:49.000Z
|
RESSPyLab/modified_cholesky.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 6
|
2019-07-22T05:47:10.000Z
|
2021-10-24T02:06:26.000Z
|
"""@package modified_cholesky
Function to perform the modified Cholesky decomposition.
"""
import numpy as np
import numpy.linalg as la
def modified_cholesky(a):
""" Returns the matrix A if A is positive definite, or returns a modified A that is positive definite.
:param np.array a: (n, n) The symmetric matrix, A.
:return list: [np.array (n, n), float] Positive definite matrix, and the factor required to do so.
See Bierlaire (2015) Alg. 11.7, pg. 278.
"""
iteration = 0
maximum_iterations = 10
identity = np.identity(len(a))
a_mod = a * 1.0
identity_factor = 0.
successful = False
while not successful and iteration < maximum_iterations:
try:
la.cholesky(a_mod)
successful = True
except la.LinAlgError:
identity_factor = np.max([2 * identity_factor, 0.5 * la.norm(a, 'fro')])
a_mod = a + identity_factor * identity
return [a_mod, identity_factor]
| 31.354839
| 106
| 0.653292
| 137
| 972
| 4.540146
| 0.481752
| 0.11254
| 0.057878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026171
| 0.253086
| 972
| 30
| 107
| 32.4
| 0.830579
| 0.385802
| 0
| 0
| 0
| 0
| 0.005291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11f9627891295b2fef341d114f820b8acfae0f4d
| 1,713
|
py
|
Python
|
estudo/bingo/bingo.py
|
PedroMoreira87/python
|
7f8ed2d17ba12a8089618477b2738e3b1c809e74
|
[
"MIT"
] | null | null | null |
estudo/bingo/bingo.py
|
PedroMoreira87/python
|
7f8ed2d17ba12a8089618477b2738e3b1c809e74
|
[
"MIT"
] | null | null | null |
estudo/bingo/bingo.py
|
PedroMoreira87/python
|
7f8ed2d17ba12a8089618477b2738e3b1c809e74
|
[
"MIT"
] | null | null | null |
# Entregar arquivo com o código da função teste_cartela
#
# Verificador de cartela de bingo
#
# CRIAR UMA FUNÇÃO DO TIPO:
#
# def teste_cartela(numeros_bilhete,numeros_sorteados): #numeros_bilhete e numeros_sorteados tipo lista com valores inteiros
#
# ...
#
# return([bingo,n_acertos,p_acertos,[numeros_acertados],[numeros_faltantes]]) #retorno tipo lista
#
# ps: a função deve suportar qualquer tamanho das listas
#
# exemplo1:
#
# bilhete=[1,2,3,4,6]
#
# sorteados=[1,2,3,4,5,6,7,8,9,10]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [true,5,100.0,[1,2,3,4,6],[]]
#
# print(x[1])
#
# 5
#
# exemplo2:
# bilhete=[1,4,7,13,20,22]
#
# sorteados=[11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [False,3,50.0,[13,20,22],[1,4,7]]
#
# print(x[3])
#
# [13,20,22]
bilhete1 = [1, 2, 3, 4, 6]
sorteados1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
bilhete2 = [1, 4, 7, 13, 20, 22]
sorteados2 = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
def teste_cartela(numeros_bilhete, numeros_sorteados):
bingo = False
n_acertos = 0
numeros_acertados = []
for element in numeros_sorteados: # outro modo de fazer list(set(sorteados).intersection(bilhete))
if element in numeros_bilhete:
numeros_acertados.append(element)
n_acertos += 1
numeros_faltantes = list(set(numeros_bilhete) - set(numeros_sorteados))
if numeros_bilhete == numeros_acertados:
bingo = True
p_acertos = len(numeros_acertados) * 100 / len(numeros_bilhete)
return [bingo, n_acertos, p_acertos, numeros_acertados, numeros_faltantes]
print(teste_cartela(bilhete1, sorteados1))
| 23.148649
| 124
| 0.669002
| 273
| 1,713
| 4.07326
| 0.344322
| 0.08813
| 0.013489
| 0.017986
| 0.371403
| 0.357914
| 0.341727
| 0.197842
| 0.197842
| 0.197842
| 0
| 0.123153
| 0.170461
| 1,713
| 73
| 125
| 23.465753
| 0.659395
| 0.472271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.111111
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11fc76302eb18d7762bad32d8a7fb8d4acc13c44
| 3,033
|
py
|
Python
|
word_breakdown.py
|
imjeffhi4/word-breakdown
|
7edf823fbc49ac56a5dc356067938d3828edc014
|
[
"MIT"
] | null | null | null |
word_breakdown.py
|
imjeffhi4/word-breakdown
|
7edf823fbc49ac56a5dc356067938d3828edc014
|
[
"MIT"
] | null | null | null |
word_breakdown.py
|
imjeffhi4/word-breakdown
|
7edf823fbc49ac56a5dc356067938d3828edc014
|
[
"MIT"
] | null | null | null |
from transformers import GPTNeoForCausalLM, GPT2Tokenizer
from fastapi import FastAPI
import re
import json
from pydantic import BaseModel
from typing import Optional
import torch
app = FastAPI()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
morph_path = './Model'
morph_tokenizer = GPT2Tokenizer.from_pretrained(morph_path)
special_tokens = {'bos_token': '<|startoftext|>', 'pad_token': '<PAD>', 'additional_special_tokens':['<DEF>', '<SYLLABLES>', '<NULL>', '<ETY>', '<MORPH>']}
morph_tokenizer.add_special_tokens(special_tokens)
morph_model = GPTNeoForCausalLM.from_pretrained(morph_path).to(device)
class UserInput(BaseModel):
word: str
definition: Optional[str] = None
# returning WikiMorph output
@app.post('/')
async def main(x: UserInput):
return get_morpheme_output(x.word, x.definition)
def get_etymology(ety_txt):
"""Parses text to return a list of dict containing the etymology compound and definitions"""
etys = re.findall('<ETY>.+?(?=<ETY>|$)', ety_txt)
for ety in etys:
compound = re.findall("<ETY>(.+?)(?=<DEF>|$)", ety)[0].strip()
if "<NULL>" not in compound:
ety_dict = {
"Etymology Compound": re.findall("<ETY>(.+?)(?=<DEF>)", ety)[0].strip(),
"Compound Meaning": re.findall("<DEF>(.+)", ety)[0].strip()
}
yield ety_dict
else:
yield {"Etymology Compound": None, "Compound Meaning": None}
def parse_morphemes(morph_txt):
"""Parses text to return a list of affixes and a definition for each affix"""
morphs = re.findall('<MORPH>.+?(?=<MORPH>|$)', morph_txt)
for morph in morphs:
yield {
"Morpheme": re.findall("<MORPH>(.+?)(?=<DEF>)", morph)[0].strip(),
"Definition": re.findall("<DEF>(.+?)(?=<ETY>)", morph)[0].strip(),
"Etymology Compounds": list(get_etymology(re.findall("(<ETY>.+?)$", morph)[0].strip()))
}
def to_dict(generated_txt):
"""Returns a dictionary containing desired items"""
return {
"Word": re.findall('<\|startoftext\|> (.+?)(?= \w )', generated_txt)[0].strip().replace(' ', ''),
"Definition": re.findall("<DEF>(.+?)(?=<SYLLABLES>)", generated_txt)[0].strip(),
"Syllables": re.findall("<SYLLABLES> (.+?)(?=<MORPH>)", generated_txt)[0].strip().split(),
"Morphemes": list(parse_morphemes(re.findall("(<MORPH>.+?)(?=<\|endoftext\|>)", generated_txt)[0].strip()))
}
def get_morpheme_output(word, definition):
"""Calls the GPT-based model to generated morphemes"""
split_word = ' '.join(word)
if definition:
word_def = f'<|startoftext|> {word} {split_word} <DEF> {definition} <SYLLABLES>'
else:
word_def = f'<|startoftext|> {word} {split_word} <DEF> '
tokenized_string = morph_tokenizer.encode(word_def, return_tensors='pt').to(device)
output = morph_model.generate(tokenized_string, max_length=400)
generated_txt = morph_tokenizer.decode(output[0])
return to_dict(generated_txt)
| 40.986486
| 155
| 0.636334
| 362
| 3,033
| 5.185083
| 0.298343
| 0.057539
| 0.025573
| 0.038359
| 0.101225
| 0.101225
| 0.101225
| 0.101225
| 0
| 0
| 0
| 0.006811
| 0.177052
| 3,033
| 73
| 156
| 41.547945
| 0.745192
| 0.092648
| 0
| 0.035088
| 0
| 0
| 0.22922
| 0.05346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.122807
| 0
| 0.298246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f506803cc0725d8f77786e4264a390f804bf912b
| 447
|
py
|
Python
|
ping_pong.py
|
kpbochenek/codewarz
|
20f600623bddd269fb845d06b1826c9e50b49594
|
[
"Apache-2.0"
] | null | null | null |
ping_pong.py
|
kpbochenek/codewarz
|
20f600623bddd269fb845d06b1826c9e50b49594
|
[
"Apache-2.0"
] | null | null | null |
ping_pong.py
|
kpbochenek/codewarz
|
20f600623bddd269fb845d06b1826c9e50b49594
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import requests
ping = sys.argv[1]
pong = sys.argv[2]
word = sys.argv[3]
if not ping.startswith('http'):
ping = 'http://' + ping
if not pong.startswith('http'):
pong = 'http://' + pong
while True:
r = requests.post(ping, data={'food': word})
answer = r.text
if 'serving' not in answer:
print(answer, end='')
break
word = answer.split()[2]
ping, pong = pong, ping
| 17.88
| 48
| 0.592841
| 65
| 447
| 4.076923
| 0.492308
| 0.079245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.239374
| 447
| 24
| 49
| 18.625
| 0.764706
| 0.04698
| 0
| 0
| 0
| 0
| 0.077647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f506a97a368ef7e32d2a9750ae1f1a3c19762e70
| 437
|
py
|
Python
|
fenixstroy/shop/forms.py
|
wiky-avis/fenixstroy_shop
|
9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c
|
[
"MIT"
] | null | null | null |
fenixstroy/shop/forms.py
|
wiky-avis/fenixstroy_shop
|
9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c
|
[
"MIT"
] | 3
|
2021-09-22T18:44:30.000Z
|
2022-03-12T00:58:02.000Z
|
fenixstroy/shop/forms.py
|
wiky-avis/fenixstroy_shop
|
9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Comment, Rating, RatingStar
class RatingForm(forms.ModelForm):
star = forms.ModelChoiceField(
queryset=RatingStar.objects.all(),
widget=forms.RadioSelect(),
empty_label=None
)
class Meta:
model = Rating
fields = ('star',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['author', 'text']
| 19.863636
| 47
| 0.631579
| 44
| 437
| 6.25
| 0.613636
| 0.101818
| 0.101818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265446
| 437
| 21
| 48
| 20.809524
| 0.856698
| 0
| 0
| 0.133333
| 0
| 0
| 0.032037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee921704bb61e5ef659b3c250a5774e67e1fc9fd
| 3,433
|
py
|
Python
|
lib/aquilon/consistency/checks/branch.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 7
|
2015-07-31T05:57:30.000Z
|
2021-09-07T15:18:56.000Z
|
lib/aquilon/consistency/checks/branch.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 115
|
2015-03-03T13:11:46.000Z
|
2021-09-20T12:42:24.000Z
|
lib/aquilon/consistency/checks/branch.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 13
|
2015-03-03T11:17:59.000Z
|
2021-09-09T09:16:41.000Z
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from aquilon.consistency.checker import ConsistencyChecker
from aquilon.aqdb.model.branch import Branch
from aquilon.worker.processes import run_git
from aquilon.worker.dbwrappers.branch import merge_into_trash
class BranchChecker(ConsistencyChecker):
"""
Branch Consistency Checker
This module performs validation that is common for all branches (both
domains and sandboxes) in template-king.
"""
def check(self, repair=False):
# Find all of the branches that are listed in the database
db_branches = {}
for branch in self.session.query(Branch):
db_branches[branch.name] = branch
# Find all of the branches that are in the template king, this
# includes both domains and sandbox's
kingdir = self.config.get("broker", "kingdir")
out = run_git(['for-each-ref', '--format=%(refname:short)',
'refs/heads'], path=kingdir, loglevel=logging.DEBUG)
git_branches = set(out.splitlines())
# The trash branch is special
if self.config.has_option("broker", "trash_branch"):
git_branches.remove(self.config.get("broker", "trash_branch"))
# Branches in the database and not in the template-king
for branch in set(db_branches.keys()).difference(git_branches):
self.failure(branch, format(db_branches[branch]),
"found in the database but not in template-king")
# No repair mode. We consider AQDB more canonical than
# template-king, so we should not delete the DB object, and we don't
# have any information how to restore the branch in template-king.
# Branches in the template-king and not in the database
for branch in git_branches.difference(db_branches.keys()):
if repair:
self.logger.info("Deleting branch %s", branch)
merge_msg = []
merge_msg.append("Delete orphaned branch %s" % branch)
merge_msg.append("")
merge_msg.append("The consistency checker found this branch to be ")
merge_msg.append("orphaned.")
if self.config.has_option("broker", "trash_branch"):
merge_into_trash(self.config, self.logger, branch,
"\n".join(merge_msg),
loglevel=logging.DEBUG)
run_git(['branch', '-D', branch], path=kingdir,
loglevel=logging.DEBUG)
else:
self.failure(branch, "Branch %s" % branch,
"found in template-king but not in the database")
| 42.9125
| 84
| 0.637635
| 437
| 3,433
| 4.947368
| 0.416476
| 0.044403
| 0.030065
| 0.023589
| 0.108233
| 0.06013
| 0.06013
| 0.035153
| 0
| 0
| 0
| 0.007603
| 0.272065
| 3,433
| 79
| 85
| 43.455696
| 0.857543
| 0.377221
| 0
| 0.111111
| 0
| 0
| 0.155577
| 0.011967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee92be80023074621572bda99d5be62e1b63d427
| 1,418
|
py
|
Python
|
server.py
|
aoii103/magicworld
|
cad0df6aa872cd5dcd4142f83ea9fde821652551
|
[
"MIT"
] | 7
|
2018-02-05T03:14:08.000Z
|
2019-07-28T18:49:41.000Z
|
server.py
|
aoii103/magicworld
|
cad0df6aa872cd5dcd4142f83ea9fde821652551
|
[
"MIT"
] | null | null | null |
server.py
|
aoii103/magicworld
|
cad0df6aa872cd5dcd4142f83ea9fde821652551
|
[
"MIT"
] | 3
|
2019-05-21T08:58:32.000Z
|
2019-12-26T17:03:07.000Z
|
import json
import os
from extra import MainStart
import threading
import moment
from jinja2 import Environment, PackageLoader
from sanic import Sanic, response
from sanic.log import logger
from termcolor import colored
from conf import config
from spider import bot
env = Environment(loader=PackageLoader(__name__, './template'))
app = Sanic(__name__)
app.static('static_path',config.static)
@app.route('/')
async def handle_request(request):
return response.text('')
idList = list(os.walk("./img"))[0][1]
logger.info(colored(f'{max(idList)}','red'))
if idList:
return response.redirect(f"/{max(idList)}")
@app.route('/<docid>')
async def handle_request(request, docid):
datapath = f"{config.static}/{docid}/data.json"
logger.info(colored(f'load {datapath}', 'yellow'))
if os.path.exists(datapath):
try:
with open(datapath, "r") as f:
template = env.get_template('index.html')
return response.html(template.render(data=json.loads(f.read())))
except Exception as e:
logger.error(e)
return response.html('',status=404)
def run_bot():
spider = bot()
spider.start()
if __name__ == '__main__':
SPY = threading.Thread(target=MainStart, args=(run_bot, None, config.delay))
SPY.start()
app.run(host=config.host,port=config.port)
| 26.754717
| 81
| 0.648096
| 180
| 1,418
| 4.983333
| 0.45
| 0.06243
| 0.031215
| 0.046823
| 0.06243
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005391
| 0.215092
| 1,418
| 52
| 82
| 27.269231
| 0.800539
| 0
| 0
| 0
| 0
| 0
| 0.101025
| 0.024158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.275
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee9c514425fe52fb6f66f62ee9d6108d08382363
| 5,332
|
py
|
Python
|
solutions/solution_14.py
|
claudiobierig/adventofcode19
|
40dabd7c780ab1cd8bad4292550cd9dd1d178365
|
[
"MIT"
] | null | null | null |
solutions/solution_14.py
|
claudiobierig/adventofcode19
|
40dabd7c780ab1cd8bad4292550cd9dd1d178365
|
[
"MIT"
] | null | null | null |
solutions/solution_14.py
|
claudiobierig/adventofcode19
|
40dabd7c780ab1cd8bad4292550cd9dd1d178365
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import math
def read_input(path):
with open(path) as file:
reactions = [line.strip().split('=>') for line in file.readlines()]
reactions2 = [[r[0].strip().split(","), r[1].strip()] for r in reactions]
result = {}
for reaction in reactions2:
goal = reaction[1].strip().split()
goal_resource = goal[1]
goal_amount = int(goal[0])
start = [r.strip().split() for r in reaction[0]]
start2 = [[int(s[0]), s[1]] for s in start]
result[goal_resource] = [goal_amount, start2]
return result
def get_amount(resource, amount_needed, reactions, leftovers):
reaction = reactions[resource]
leftover = leftovers.get(resource, 0)
number_of_reactions = math.ceil((amount_needed - leftover)/reaction[0])
leftovers[resource] = leftover + number_of_reactions*reaction[0] - amount_needed
return [[number_of_reactions*r[0], r[1]] for r in reaction[1]]
def need_reaction(required_resources):
for resource in required_resources:
if resource[1] != "ORE":
return True
return False
def reduce_leftovers(leftovers, reactions):
"""
>>> reactions = read_input("input/14.txt")
>>> leftovers = {"DWBL": 10}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'DWBL': 1, 'ORE': 149}
>>> leftovers = {"ZKZHV": 9}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> leftovers = {'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> leftovers = {"ZKZHV": 8, 'DWBL': 7}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 0, 'DWBL': 0, 'KFKWH': 0, 'ORE': 681}
"""
can_reduce = True
while can_reduce:
can_reduce = False
to_add = {}
for key in leftovers.keys():
if key == "ORE":
continue
if reactions[key][0] <= leftovers[key]:
times = int(leftovers[key]/reactions[key][0])
can_reduce = True
leftovers[key] -= times*reactions[key][0]
for r in reactions[key][1]:
to_add[r[1]] = to_add.get(r[1], 0) + times*r[0]
for key, value in to_add.items():
leftovers[key] = leftovers.get(key, 0) + value
if __name__ == "__main__":
input = read_input("input/14.txt")
leftovers = {}
required_resources = get_amount("FUEL", 1, input, leftovers)
while need_reaction(required_resources):
i = 0
while required_resources[i][1] == "ORE":
i += 1
required_resources += get_amount(required_resources[i][1], required_resources[i][0], input, leftovers)
required_resources.pop(i)
required_ore = 0
for r in required_resources:
required_ore += r[0]
print("Solution1")
print(required_ore)
max_ore = 1000000000000
without_problems = int(max_ore/required_ore)
leftovers2 = {k:without_problems*leftovers[k] for k in leftovers.keys()}
ore = required_ore*without_problems
fuel = without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
while without_problems > 0:
without_problems = int((max_ore-ore)/required_ore)
for key, value in leftovers.items():
leftovers2[key] = leftovers2.get(key, 0) + value*without_problems
ore += required_ore*without_problems
fuel += without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
leftovers2["FUEL"] = 1
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
if ore<=max_ore:
fuel += 1
print("Solution 2")
print(fuel)
| 45.186441
| 693
| 0.564891
| 715
| 5,332
| 4.106294
| 0.202797
| 0.057902
| 0.049046
| 0.067439
| 0.438011
| 0.396117
| 0.361035
| 0.361035
| 0.361035
| 0.344005
| 0
| 0.054568
| 0.240435
| 5,332
| 117
| 694
| 45.57265
| 0.67037
| 0.370968
| 0
| 0.125
| 0
| 0
| 0.022685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.0125
| 0
| 0.1125
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee9daa8c3f24ee0e5956c82c505b318b5493b1d6
| 471
|
py
|
Python
|
src/actions/action_sleep.py
|
JohnVillalovos/webhook-proxy
|
fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3
|
[
"MIT"
] | null | null | null |
src/actions/action_sleep.py
|
JohnVillalovos/webhook-proxy
|
fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3
|
[
"MIT"
] | null | null | null |
src/actions/action_sleep.py
|
JohnVillalovos/webhook-proxy
|
fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3
|
[
"MIT"
] | null | null | null |
import time
from actions import Action, action
@action("sleep")
class SleepAction(Action):
def __init__(
self, seconds, output="Waiting {{ seconds }} seconds before continuing ..."
):
self.seconds = seconds
self.output_format = output
def _run(self):
seconds = float(self._render_with_template(str(self.seconds)))
print(self._render_with_template(self.output_format, seconds=seconds))
time.sleep(seconds)
| 23.55
| 83
| 0.673036
| 54
| 471
| 5.62963
| 0.444444
| 0.144737
| 0.105263
| 0.144737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218684
| 471
| 19
| 84
| 24.789474
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.118896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.384615
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee9fab028e33102060e656a46df7bd6afed90358
| 1,262
|
py
|
Python
|
a1d05eba1/special_fields/choice_filter.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
a1d05eba1/special_fields/choice_filter.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | 28
|
2020-06-23T19:00:58.000Z
|
2021-03-26T22:13:07.000Z
|
a1d05eba1/special_fields/choice_filter.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
from ..utils.kfrozendict import kfrozendict
from ..utils.kfrozendict import kassertfrozen
class ChoiceFilter:
ROW_KEYS = {
'1': ['choice_filter'],
'2': ['choice_filter'],
}
EXPORT_KEY = 'choice_filter'
@classmethod
def in_row(kls, row, schema):
return 'choice_filter' in row
@classmethod
def pull_from_row(kls, row, content):
schema = content.schema_version
if schema == '2':
cfdata = row.get('choice_filter')
if not cfdata:
return
assert 'raw' in cfdata
yield ChoiceFilter(content=content, val=cfdata)
elif schema == '1':
cfdata = {'raw': row['choice_filter']}
yield ChoiceFilter(content=content, val=cfdata)
def __init__(self, content, val):
self.content = content
self.key = 'choice_filter'
self.val = val
self._string = val.get('raw')
def dict_key_vals_old(self, renames=None):
# print(('choice_filter', self._string,))
yield ('choice_filter', self._string,)
@kassertfrozen
def dict_key_vals_new(self, renames=None):
return (
'choice_filter',
kfrozendict(raw=self.val.get('raw')),
)
| 28.044444
| 59
| 0.585578
| 140
| 1,262
| 5.071429
| 0.307143
| 0.169014
| 0.067606
| 0.073239
| 0.112676
| 0.112676
| 0
| 0
| 0
| 0
| 0
| 0.004515
| 0.29794
| 1,262
| 44
| 60
| 28.681818
| 0.79684
| 0.030903
| 0
| 0.111111
| 0
| 0
| 0.108927
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.138889
| false
| 0
| 0.055556
| 0.055556
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee9ff38e8ac3eaab8a58f8de6b4ed70735c17d0f
| 3,878
|
py
|
Python
|
hamster_control_test_version.py
|
iamnotmarcel/HamsterModell
|
ce8391e8e120e2cf957f9d49e812be3c4f757f75
|
[
"MIT"
] | null | null | null |
hamster_control_test_version.py
|
iamnotmarcel/HamsterModell
|
ce8391e8e120e2cf957f9d49e812be3c4f757f75
|
[
"MIT"
] | 1
|
2022-03-26T17:27:30.000Z
|
2022-03-26T17:27:30.000Z
|
hamster_control_test_version.py
|
iamnotmarcel/HamsterModell
|
ce8391e8e120e2cf957f9d49e812be3c4f757f75
|
[
"MIT"
] | null | null | null |
'''
Author: Marcel Miljak
Klasse: 5aHEL - HTL Anichstraße
Diplomarbeit: Entwicklung eines Hamster Roboters
Jahrgang: 2021/22
'''
import time
from time import sleep
import RPi.GPIO as GPIO
DIR_2 = 18 # Direction-Pin vom 2ten Modul
DIR_1 = 24 # Direction-pin vom 1sten Modul
STEP_1 = 25 # Step-Pin vom 1sten Modul
STEP_2 = 23 # Step-Pin vom 2ten Modul
CW = 1 # Clockwise Rotation
CCW = 0 # Counterclockwise Rotation
SENS_TRIG = 6 # Trigger-Pin HC-SR04
SENS_ECHO = 5 # Echo-Pin HC-SR04
whole_cycle = 300 # ganze Umdrehung (360 / 7.5) was aber foisch is
cycle_left = 548 # Viertel Umdrehung
delay = 0.005
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(DIR_2, GPIO.OUT)
GPIO.setup(STEP_1, GPIO.OUT)
GPIO.setup(STEP_2, GPIO.OUT)
GPIO.setup(DIR_1, GPIO.OUT)
GPIO.setup(SENS_TRIG, GPIO.OUT)
GPIO.setup(SENS_ECHO, GPIO.IN)
def vor():
'''
lässt den Hamster eine ganze Motor-Umdrehung
nach vorne fahren (360°)
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CW)
print("Vorwärts...")
for i in range(3):
dist = vornFrei()
if dist < 20.0:
print("Achtung - Hinderniss voraus!")
stop()
time.sleep(delay)
linksUm()
time.sleep(delay)
break
else:
for i in range (100):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
def linksUm():
'''
Dreht sich um 90° nach links
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CCW)
print("Ausrichtung nach links...")
for i in range(298):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
def rechtsUm():
'''
Nur als Test angesehen, ob Hamster auch wirklich nach
rechts ausrichtet
'''
setup()
print("Ausrichtung nach rechts...")
linksUm()
linksUm()
linksUm()
GPIO.cleanup()
def vornFrei():
'''
liefert true, wenn sich keine Mauer vor dem Hamster
befindet.
Kommt gemeinsam mit Obstacle-Avoidance-Sensor in
Einsatz.
'''
setup()
GPIO.output(SENS_TRIG,1)
time.sleep(0.00001)
GPIO.output(SENS_TRIG,0)
while GPIO.input(SENS_ECHO) == 0:
pass
start = time.time()
timer = 0
while (GPIO.input(SENS_ECHO) == 1 and timer <= 12):
timer +=1
time.sleep(0.0001)
stop = time.time()
return (stop-start) * 34300 / 2
def stop():
'''
Wenn sich eine Mauer vor dem Hamster befindet,
soll diese Funktion die Motoren stoppen.
'''
setup()
print("Stop des Hamsters...")
GPIO.output(DIR_1, GPIO.LOW)
GPIO.output(DIR_2, GPIO.LOW)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
'''
def kornDa():
liefert true, wenn sich auf dem Feld, auf der der
Hamster steht, sich mindestens ein Korn befindet.
setup()
print("Check ob Korn auf Feld vorhanden...")
korn_indicator = GPIO.input(SENS_Korn)
if korn_indicator == 0:
print("Es befindet sich ein Korn auf dem Feld")
return True
else:
return False
'''
def nimm():
'''
nimmt von dem Feld, auf dem er gerade steht, ein Korn auf
'''
pass
def gib():
'''
lege auf dem Feld, auf dem er gerade steht, ein Korn
aus seinem Maul ab.
'''
pass
def maulLeer():
'''
liefert true, wenn der Hamster keinen Körner
im Maul hat.
'''
pass
| 22.678363
| 70
| 0.581227
| 522
| 3,878
| 4.250958
| 0.350575
| 0.081118
| 0.063091
| 0.036052
| 0.298783
| 0.200991
| 0.180261
| 0.180261
| 0.166291
| 0.136548
| 0
| 0.039955
| 0.309438
| 3,878
| 171
| 71
| 22.678363
| 0.787901
| 0.24394
| 0
| 0.340659
| 0
| 0
| 0.04616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098901
| false
| 0.043956
| 0.032967
| 0
| 0.142857
| 0.054945
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea2f57d28acf6796635f1259b4f5d6adad79071
| 7,980
|
py
|
Python
|
codeball/tests/test_models.py
|
metrica-sports/codeball
|
60bfe54b7898bed87cbbbae9dfc0f3bc49d31025
|
[
"MIT"
] | 54
|
2020-09-16T13:09:03.000Z
|
2022-03-28T12:32:19.000Z
|
codeball/tests/test_models.py
|
metrica-sports/codeball
|
60bfe54b7898bed87cbbbae9dfc0f3bc49d31025
|
[
"MIT"
] | null | null | null |
codeball/tests/test_models.py
|
metrica-sports/codeball
|
60bfe54b7898bed87cbbbae9dfc0f3bc49d31025
|
[
"MIT"
] | 9
|
2021-03-28T13:02:57.000Z
|
2022-03-24T11:19:06.000Z
|
import os
import pandas as pd
from kloppy import (
load_epts_tracking_data,
to_pandas,
load_metrica_json_event_data,
load_xml_code_data,
)
from codeball import (
GameDataset,
DataType,
TrackingFrame,
EventsFrame,
CodesFrame,
PossessionsFrame,
BaseFrame,
Zones,
Area,
PatternEvent,
Pattern,
PatternsSet,
)
import codeball.visualizations as vizs
class TestModels:
def test_pattern_event(self):
xy = [0.3, 0.6]
viz = vizs.Players(
start_time=500, end_time=700, players=[], options=[]
)
pattern_event = PatternEvent(
pattern_code="MET_001",
start_time=400,
event_time=500,
end_time=800,
coordinates=[xy, xy],
visualizations=[viz, viz],
tags=["T001"],
)
assert pattern_event.end_time == 800
assert pattern_event.coordinates[0][0] == 0.3
assert pattern_event.visualizations[0].start_time == 500
def test_pattern(self):
class pattern_class(Pattern):
def __init__(
self,
name: str,
code: str,
in_time: int = 0,
out_time: int = 0,
parameters: dict = None,
game_dataset: GameDataset = None,
):
super().__init__(
name, code, in_time, out_time, parameters, game_dataset
)
def run(self):
return True
def build_pattern_event(self):
pass
test_pattern = pattern_class(
name="Test Pattern",
code="MET_001",
in_time=3,
out_time=2,
parameters=None,
game_dataset=None,
)
assert test_pattern.in_time == 3
assert test_pattern.run() is True
def test_game_dataset(self):
base_dir = os.path.dirname(__file__)
game_dataset = GameDataset(
tracking_metadata_file=f"{base_dir}/files/metadata.xml",
tracking_data_file=f"{base_dir}/files/tracking.txt",
events_metadata_file=f"{base_dir}/files/metadata.xml",
events_data_file=f"{base_dir}/files/events.json",
)
assert game_dataset.tracking.data_type == DataType.TRACKING
assert game_dataset.events.data_type == DataType.EVENT
def test_tracking_game_dataset(self):
base_dir = os.path.dirname(__file__)
game_dataset = GameDataset(
tracking_metadata_file=f"{base_dir}/files/metadata.xml",
tracking_data_file=f"{base_dir}/files/tracking.txt",
)
assert game_dataset.tracking.data_type == DataType.TRACKING
assert game_dataset.has_event_data is False
def test_codes_only_game_dataset(self):
base_dir = os.path.dirname(__file__)
game_dataset = GameDataset(
codes_files=f"{base_dir}/files/code_xml.xml",
)
assert game_dataset.codes[0].data_type == DataType.CODE
assert game_dataset.has_event_data is False
def test_pattern_set(self):
base_dir = os.path.dirname(__file__)
game_dataset = GameDataset(
tracking_metadata_file=f"{base_dir}/files/metadata.xml",
tracking_data_file=f"{base_dir}/files/tracking.txt",
events_metadata_file=f"{base_dir}/files/metadata.xml",
events_data_file=f"{base_dir}/files/events.json",
)
class pattern_class(Pattern):
def __init__(
self,
name: str,
code: str,
in_time: int = 0,
out_time: int = 0,
parameters: dict = None,
game_dataset: GameDataset = None,
):
super().__init__(
name, code, in_time, out_time, parameters, game_dataset
)
def run(self):
return True
def build_pattern_event(self):
pass
test_pattern = pattern_class(
name="Test Pattern",
code="MET_001",
in_time=3,
out_time=2,
parameters=None,
game_dataset=game_dataset,
)
patterns_set = PatternsSet(game_dataset=game_dataset)
patterns_set.patterns = [test_pattern, test_pattern]
assert patterns_set.game_dataset.events.data_type == DataType.EVENT
assert len(patterns_set.patterns) == 2
def test_base_data_frame(self):
data = {
"player1_x": [1, 2, 3, 4],
"player2_x": [5, 6, 7, 8],
"player3_x": [9, 10, 11, 12],
}
base_df = BaseFrame(data)
base_df.metadata = "metadata"
base_df.records = [1, 2, 3, 4]
base_df.data_type = "test"
assert isinstance(base_df, BaseFrame)
assert hasattr(base_df, "metadata")
assert hasattr(base_df, "records")
assert isinstance(base_df[["player1_x", "player2_x"]], BaseFrame)
assert hasattr(base_df[["player1_x", "player2_x"]], "metadata")
assert not hasattr(base_df[["player1_x", "player2_x"]], "records")
def test_tracking_data_frame(self):
base_dir = os.path.dirname(__file__)
tracking_dataset = load_epts_tracking_data(
metadata_filename=f"{base_dir}/files/metadata.xml",
raw_data_filename=f"{base_dir}/files/tracking.txt",
)
tracking = TrackingFrame(to_pandas(tracking_dataset))
tracking.data_type = DataType.TRACKING
tracking.metadata = tracking_dataset.metadata
tracking.records = tracking_dataset.records
assert tracking.get_team_by_id("FIFATMA").team_id == "FIFATMA"
assert tracking.get_period_by_id(1).id == 1
assert tracking.get_other_team_id("FIFATMA") == "FIFATMB"
assert tracking.team("FIFATMA").shape[1] == 22
assert tracking.dimension("x").shape[1] == 23
assert tracking.players().shape[1] == 44
assert tracking.players("field").shape[1] == 40
assert sum(tracking.phase(defending_team_id="FIFATMA")) == 0
assert sum(tracking.team("FIFATMA").stretched(90)) == 863
def test_events_data_frame(self):
base_dir = os.path.dirname(__file__)
events_dataset = load_metrica_json_event_data(
metadata_filename=f"{base_dir}/files/metadata.xml",
raw_data_filename=f"{base_dir}/files/events.json",
)
events = EventsFrame(to_pandas(events_dataset))
events.data_type = DataType.EVENT
events.metadata = events_dataset.metadata
events.records = events_dataset.records
assert events.type("PASS").shape[0] == 26
assert events.result("COMPLETE").shape[0] == 45
assert events.into(Zones.OPPONENT_BOX).shape[0] == 1
assert events.starts_inside(Zones.OPPONENT_BOX).shape[0] == 2
assert events.ends_inside(Zones.OPPONENT_BOX).shape[0] == 2
assert events.ends_outside(Zones.OPPONENT_BOX).shape[0] == 43
# Test diferent ways to input Zones and areas
custom_area = Area((0.25, 0.2), (0.75, 0.8))
assert (
events.ends_outside(Zones.OPPONENT_BOX, Zones.OWN_BOX).shape[0]
== 45
)
assert (
events.ends_inside(Zones.OPPONENT_BOX, custom_area).shape[0] == 14
)
assert events.ends_inside(custom_area, custom_area).shape[0] == 12
def test_codes_data_frame(self):
base_dir = os.path.dirname(__file__)
codes_dataset = load_xml_code_data(
xml_filename=f"{base_dir}/files/code_xml.xml",
)
codes = CodesFrame(to_pandas(codes_dataset))
codes.data_type = DataType.CODE
codes.metadata = codes_dataset.metadata
codes.records = codes_dataset.records
assert len(codes.records) == 3
| 30.930233
| 78
| 0.595614
| 938
| 7,980
| 4.76226
| 0.168444
| 0.056638
| 0.028655
| 0.046564
| 0.533244
| 0.49944
| 0.46116
| 0.40094
| 0.40094
| 0.376091
| 0
| 0.02439
| 0.301253
| 7,980
| 257
| 79
| 31.050584
| 0.776722
| 0.005388
| 0
| 0.363184
| 0
| 0
| 0.088217
| 0.058097
| 0
| 0
| 0
| 0
| 0.189055
| 1
| 0.079602
| false
| 0.014925
| 0.024876
| 0.00995
| 0.129353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea44ef30a81ba67ad14a68694b3cdcb38fe067e
| 1,686
|
py
|
Python
|
cv_workshops/6-day/2-clazz.py
|
afterloe/opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | 5
|
2020-03-13T07:34:30.000Z
|
2021-10-01T03:03:05.000Z
|
cv_workshops/6-day/2-clazz.py
|
afterloe/Opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | null | null | null |
cv_workshops/6-day/2-clazz.py
|
afterloe/Opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | 1
|
2020-03-01T13:21:43.000Z
|
2020-03-01T13:21:43.000Z
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
使用几何矩计算轮廓中心与横纵波比对过滤
对二值图像的各个轮廓进行计算获得对应的几何矩,根据几何矩计算轮廓点的中心位置。
cv.moments(contours, binaryImage)
- contours: 轮廓点集
- binaryImage: bool, default False;二值图返回
"""
def main():
src = cv.imread("../../pic/money.jpg")
cv.namedWindow("src", cv.WINDOW_KEEPRATIO)
cv.namedWindow("dst", cv.WINDOW_KEEPRATIO)
cv.imshow("src", src)
t = 80
binary = cv.Canny(src, t, t * 2)
k = np.ones((3, 3), dtype=np.uint8)
binary = cv.morphologyEx(binary, cv.MORPH_DILATE, k)
contours, _ = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for index in range(len(contours)):
contour = contours[index]
rect = cv.minAreaRect(contour)
# cx, cy = rect[0]
ww, hh = rect[1]
ratio = np.minimum(ww, hh) / np.maximum(ww, hh)
print("ratio is ", ratio)
mm = cv.moments(contour)
m00 = mm["m00"]
m10 = mm["m10"]
m01 = mm["m01"]
cx = np.int(m10 / m00)
cy = np.int(m01 / m00)
box = np.int0(cv.boxPoints(rect))
if 0.9 < ratio:
cv.drawContours(src, [box], 0, (255, 0, 0), 2, cv.LINE_8)
cv.circle(src, (np.int32(cx), np.int32(cy)), 2, (0, 0, 255), 2, cv.LINE_8)
if 0.5 > ratio:
cv.drawContours(src, [box], 0, (255, 255, 0), 2, cv.LINE_8)
cv.circle(src, (np.int32(cx), np.int32(cy)), 2, (0, 255, 0), 2, cv.LINE_8)
cv.imshow("dst", src)
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
| 31.811321
| 87
| 0.541518
| 231
| 1,686
| 3.87013
| 0.402597
| 0.035794
| 0.03132
| 0.035794
| 0.173378
| 0.173378
| 0.173378
| 0.089485
| 0.089485
| 0.089485
| 0
| 0.066554
| 0.295967
| 1,686
| 52
| 88
| 32.423077
| 0.686605
| 0.034994
| 0
| 0
| 0
| 0
| 0.041697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.055556
| 0
| 0.083333
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea747f6a5f58fa9f7cb6e82312ed9dadca75ac3
| 1,967
|
py
|
Python
|
war.py
|
Eduardojvr/Space_Atack_Game
|
f37e1891bf00af71f3c1758a0288a6b0b830bb9e
|
[
"MIT"
] | null | null | null |
war.py
|
Eduardojvr/Space_Atack_Game
|
f37e1891bf00af71f3c1758a0288a6b0b830bb9e
|
[
"MIT"
] | null | null | null |
war.py
|
Eduardojvr/Space_Atack_Game
|
f37e1891bf00af71f3c1758a0288a6b0b830bb9e
|
[
"MIT"
] | null | null | null |
from settings import Settings
from ship import Ship
import pygame
import sys
from trap import Trap
from time import clock
from random import randint
def run_game():
tela1 = Settings()
screen = pygame.display.set_mode((tela1.altura, tela1.largura))
background = Settings()
pygame.display.set_caption("Space War")
nave = Ship(screen)
#pygame.mouse.set_visible(0)
trap = [Trap(screen,randint(0,1200)), Trap(screen,randint(0,1200)), Trap(screen,randint(0,1200))]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
nave.rect.centerx +=30
elif event.key == pygame.K_LEFT:
nave.rect.centerx -=30
elif event.key == pygame.K_UP:
nave.rect.bottom -=30
elif event.key == pygame.K_DOWN:
nave.rect.bottom +=30
elif event.key == pygame.K_SPACE:
nave.moveMissile()
for i in trap:
i.rect.bottom += 30
if (i.rect.colliderect(nave.rect)):
nave.vida = nave.vida-1
if (nave.vida < 1):
background.bg_image = pygame.image.load('imagens/gameover.bmp')
screen.fill(tela1.bg_color)
screen.blit(background.bg_image, (0,0))
nave.blitme()
nave.blitmemissile()
for i in trap:
i.blitme()
for i in trap:
if i.rect.centery > Settings().altura:
i.rect.centery = 0
i.rect.centerx = randint(0,1200)
i.rect.centery = randint(0,200)
pygame.display.flip()
################################ Main ################################
run_game()
| 32.245902
| 101
| 0.516523
| 229
| 1,967
| 4.379913
| 0.323144
| 0.02991
| 0.069791
| 0.074776
| 0.229312
| 0.207378
| 0.207378
| 0.207378
| 0.207378
| 0.065803
| 0
| 0.034215
| 0.346213
| 1,967
| 60
| 102
| 32.783333
| 0.745723
| 0.016268
| 0
| 0.0625
| 0
| 0
| 0.015533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.145833
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea8fc748971275806d47350049795a3a98b474a
| 1,463
|
py
|
Python
|
Project-2/doc/contingency_mat_parser.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | null | null | null |
Project-2/doc/contingency_mat_parser.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | 12
|
2020-01-28T22:09:15.000Z
|
2022-03-11T23:16:26.000Z
|
Project-2/doc/contingency_mat_parser.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | null | null | null |
import sys
import argparse
def read_in(file_path):
try:
file = open(file_path, 'r')
except:
sys.stderr.write("[ERROR] read_in(): Cannot open file '%s'\n" % file_path)
exit(1)
file_content = []
for line in file:
file_content.append(line)
i = 0
while i < len(file_content):
line = file_content[i]
title = line[5:-6]
print("\t%s" % title)
line = file_content[i + 7].strip('\n').strip(' ').strip('\t')
line = [l.strip('\t') for l in line.split(' ') if l]
for item in line:
print("\t%s" % item.replace("cluster_", "c")),
print("")
for j in range(8, 28):
line = file_content[i + j].strip('\n').strip(' ').strip('\t')
line = [l.strip('\t') for l in line.split(' ') if l]
for item in line:
print("%s\t" % item),
print("")
i += 28
return file_content
def add_parser():
parser = argparse.ArgumentParser(prog='Compare Evaluation Result')
parser.add_argument("-s", "--src",
dest = "src",
help = "source file path",
required = True
)
parser.add_argument("-d", "--dest",
dest = "dest",
help = "destination file path",
)
return parser
def main():
parser = add_parser()
args = parser.parse_args()
src_file = read_in(args.src)
if __name__ == '__main__':
main()
| 21.835821
| 82
| 0.514012
| 189
| 1,463
| 3.830688
| 0.354497
| 0.106354
| 0.062155
| 0.066298
| 0.176796
| 0.176796
| 0.176796
| 0.176796
| 0.176796
| 0.176796
| 0
| 0.010101
| 0.323308
| 1,463
| 67
| 83
| 21.835821
| 0.721212
| 0
| 0
| 0.12766
| 0
| 0
| 0.117486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.042553
| 0
| 0.148936
| 0.106383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea9326c5e16b9ddd8185aff0917cab86602e465
| 5,426
|
py
|
Python
|
voldemort_client/helper.py
|
mirko-lelansky/voldemort-client
|
a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9
|
[
"Apache-2.0"
] | null | null | null |
voldemort_client/helper.py
|
mirko-lelansky/voldemort-client
|
a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9
|
[
"Apache-2.0"
] | null | null | null |
voldemort_client/helper.py
|
mirko-lelansky/voldemort-client
|
a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mirko Lelansky <mlelansky@mail.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains some helper methods for building parts of http requests.
"""
from datetime import datetime
import simplejson as json
from voldemort_client.exception import VoldemortError
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.")
def merge_vector_clock(vector_clock, node_id, timeout=None):
"""This method merges an existing vector clock with the new values.
Parameters
----------
vector_clock : dict
the vector clock which should be updated
node_id : int
the node id to use
timeout : int
the expire timeout of the key
Returns
-------
dict
the update vector clock as dictionary
"""
if vector_clock is not None and node_id is not None:
versions = vector_clock["versions"]
version_map_list_node = [version_map for version_map in versions
if version_map["nodeId"] == node_id]
if version_map_list_node == []:
versions.append({"nodeId": node_id, "version": 1})
elif len(version_map_list_node) == 1:
old_map = version_map_list_node[0]
new_map = old_map
new_map["version"] = new_map["version"] + 1
versions.remove(old_map)
versions.append(new_map)
else:
raise VoldemortError("Only one version map per node is allowed.")
vector_clock["versions"] = versions
if timeout is not None:
vector_clock["timestamp"] = timeout
return vector_clock
else:
raise ValueError("You need the vector clock, timeout and the node id.")
def build_get_headers(request_timeout):
"""This method builds the request headers for get requests like receving keys.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
Returns
-------
dict
the headers as dictonary
"""
timestamp = datetime.now().timestamp()
return {
"X-VOLD-Request-Timeout-ms": str(int(request_timeout)),
"X-VOLD-Request-Origin-Time-ms": str(int(timestamp))
}
def build_delete_headers(request_timeout, vector_clock):
"""This method builds the request headers for the delete requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
vector_clock : dict
the vector clock which represents the version which should be delete
Returns
-------
dict
the headers as dictionary
"""
delete_headers = build_get_headers(request_timeout)
delete_headers["X-VOLD-Vector-Clock"] = json.dumps(vector_clock)
return delete_headers
def build_set_headers(request_timeout, vector_clock, content_type="text/plain"):
"""This method builds the request headers for the set requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
vector_clock : dict
the vector clock which represents the version which should be create or
update
content_type : str
the content type of the value
Returns
-------
dict
the headers as dictionary
"""
set_headers = build_delete_headers(request_timeout, vector_clock)
set_headers["Content-Type"] = content_type
return set_headers
def build_version_headers(request_timeout):
"""This method builds the request headers for the version requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
Returns
--------
dict
the headers as dictionary
"""
version_headers = build_get_headers(request_timeout)
version_headers["X-VOLD-Get-Version"] = ""
return version_headers
def build_url(url, store_name, key):
"""This method combine the different parts of the urls to build the url to
acces the REST-API.
Parameters
----------
url : str
the base url
store_name : str
the name of the voldemort store
key : str
the url part which represents the key or keys
Returns
-------
str
the combined url of the REST-API
"""
return "%s/%s/%s" % (url, store_name, key)
| 29.32973
| 82
| 0.651493
| 712
| 5,426
| 4.837079
| 0.247191
| 0.076655
| 0.042683
| 0.027584
| 0.360046
| 0.308653
| 0.265679
| 0.230836
| 0.207317
| 0.207317
| 0
| 0.003255
| 0.263914
| 5,426
| 184
| 83
| 29.48913
| 0.859039
| 0.490232
| 0
| 0.096154
| 0
| 0
| 0.144118
| 0.022689
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0
| 0.057692
| 0
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eea9c161475ffd63195c5ca94c42455b4deb9625
| 1,581
|
py
|
Python
|
src/reddack/exceptions.py
|
diatomicDisaster/Reddit-Slackbot
|
4f22af110e72eab19d9162a4428800a1895303f3
|
[
"MIT"
] | null | null | null |
src/reddack/exceptions.py
|
diatomicDisaster/Reddit-Slackbot
|
4f22af110e72eab19d9162a4428800a1895303f3
|
[
"MIT"
] | 10
|
2022-02-21T01:11:20.000Z
|
2022-02-22T18:13:00.000Z
|
src/reddack/exceptions.py
|
diatomicDisaster/redack
|
4f22af110e72eab19d9162a4428800a1895303f3
|
[
"MIT"
] | null | null | null |
from __future__ import (
annotations,
)
class ModFromSlackError(Exception):
"""Base class for modfromslack errors"""
def __init__(
self,
message: str,
*,
preamble: str | None = None,
afterword: str | None = None
) -> None:
if preamble is not None:
message = f"{preamble} {message}"
if afterword is not None:
message = f"{message}\n\n{afterword}"
super().__init__(message)
class MsgSendError(ModFromSlackError):
"""Failed to send Slack message."""
class SequenceError(ModFromSlackError):
"""Something has happened in the wrong order."""
def __init__(
self,
should_be_first,
should_be_second,
*,
preamble: str | None = None,
afterword: str | None = None
) -> None:
message = f"Expected {should_be_first} before {should_be_second}"
super().__init__(
message,
preamble = preamble,
afterword = afterword
)
class ActionSequenceError(SequenceError):
"""App thinks action came before its parent message."""
def __init__(
self,
parentmsg_ts,
action_ts,
*,
afterword=None
) -> None:
_preamble=f"'message_ts' {parentmsg_ts} is later than 'action_ts' {action_ts}"
super().__init__(
"parent message",
"action",
preamble=_preamble,
afterword=afterword
)
class ConfigError(ModFromSlackError):
"""Error in config file format."""
| 26.79661
| 86
| 0.573688
| 153
| 1,581
| 5.640523
| 0.392157
| 0.06489
| 0.050985
| 0.044032
| 0.229432
| 0.099652
| 0.099652
| 0.099652
| 0.099652
| 0
| 0
| 0
| 0.324478
| 1,581
| 58
| 87
| 27.258621
| 0.808052
| 0.117647
| 0
| 0.416667
| 0
| 0
| 0.132213
| 0.017531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.020833
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeaa2be76b33b3286d73455fcb963e240ddf8af4
| 7,276
|
py
|
Python
|
cid/cli/cli_generator.py
|
zeljko-bal/CID
|
52ecc445c441ec63386c9f092b226090588a3789
|
[
"MIT"
] | 1
|
2017-09-15T06:14:54.000Z
|
2017-09-15T06:14:54.000Z
|
cid/cli/cli_generator.py
|
zeljko-bal/CID
|
52ecc445c441ec63386c9f092b226090588a3789
|
[
"MIT"
] | null | null | null |
cid/cli/cli_generator.py
|
zeljko-bal/CID
|
52ecc445c441ec63386c9f092b226090588a3789
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from os import makedirs
from os.path import realpath, join, dirname, isdir, exists
from shutil import copy
from jinja2 import Environment, FileSystemLoader
from cid.cli.cli_model_specs import CliModelSpecs
from cid.cli import cli_post_processing
from cid.parser.cid_parser import parse
from cid.common.cid_model_processor import CidModelProcessor
from cid.common.utils import *
_cli_templates_path = join(dirname(realpath(__file__)), 'templates')
_cli_framework_path = join(dirname(realpath(__file__)), 'framework')
# ------------------------------- JINJA FILTERS -------------------------------
def parameter_model_filter(parameter):
def print_list(lst):
return str(lst) if len(lst) > 1 else "'{}'".format(lst[0])
if parameter.type == 'Bool':
positives = [p.positive for p in parameter.all_patterns if p.positive]
negatives = [p.negative for p in parameter.all_patterns if p.negative]
positives_str = ", positives={prefixes}".format(prefixes=print_list(positives)) if positives else ''
negatives_str = ", negatives={prefixes}".format(prefixes=print_list(negatives)) if negatives else ''
return "BooleanNonpositional('{name}'{positives}{negatives})".format(
name=parameter.name, positives=positives_str, negatives=negatives_str)
else:
ret = []
classified = defaultdict(lambda: defaultdict(set))
for pattern in parameter.all_patterns:
if pattern.white_space:
if pattern.count:
count_str = ", count={count}".format(count=pattern.count)
elif pattern.count_many:
count_str = ", count='*'"
else:
count_str = ''
classified['MultiArgNonpositional'][count_str].add(pattern)
else:
if pattern.count_many:
if pattern.separator:
separator_str = ", '{}'".format(pattern.separator)
else:
separator_str = ''
classified['SeparatedNonpositional'][separator_str].add(pattern)
elif pattern.count_char:
classified['CounterNonpositional'][pattern.count_char].add(pattern)
else:
classified['BasicNonpositional']['_'].add(pattern)
if classified['MultiArgNonpositional']:
for count_str, patterns in classified['MultiArgNonpositional'].items():
prefixes = [p.prefix for p in patterns]
ret.append("MultiArgNonpositional('{name}', {prefixes}{count_str})".format(name=parameter.name, prefixes=print_list(prefixes), count_str=count_str))
if classified['SeparatedNonpositional']:
for separator_str, patterns in classified['SeparatedNonpositional'].items():
prefixes = [p.prefix for p in patterns]
ret.append("SeparatedNonpositional('{name}', {prefixes}{separator_str})".format(name=parameter.name, prefixes=print_list(prefixes), separator_str=separator_str))
if classified['CounterNonpositional']:
for count_char, patterns in classified['CounterNonpositional'].items():
prefixes = [p.prefix for p in patterns]
ret.append("CounterNonpositional('{name}', {prefixes}, '{count_char}')".format(name=parameter.name, prefixes=print_list(prefixes), count_char=count_char))
if classified['BasicNonpositional']:
for _, patterns in classified['BasicNonpositional'].items():
prefixes = [p.prefix for p in patterns]
ret.append("BasicNonpositional('{name}', {prefixes})".format(name=parameter.name, prefixes=print_list(prefixes)))
return ', '.join(ret)
def have_sub_commands_filter(commands):
return any([c.sub_commands for c in commands])
# ------------------------------- GENERATOR FUNCTIONS -------------------------------
def process_model(model):
for visitor in cli_post_processing.model_visitors:
CidModelProcessor(visitor).process_model(model)
CidModelProcessor(CliModelSpecs().visitor).process_model(model)
def render_cli_code(model, root_command_name, cli_app_path):
# EXTRACT DATA ---------------------
model_extractor = ElementExtractor()
CidModelProcessor(model_extractor.visitor).process_model(model)
all_commands = model_extractor.all_commands
all_parameters = model_extractor.all_parameters
# RENDER CLI PARSER ---------------------
env = Environment(loader=FileSystemLoader(_cli_templates_path))
env.filters['parameter_model'] = parameter_model_filter
env.filters['element_type'] = element_type
env.filters['tab_indent'] = tab_indent_filter
env.filters['stringify'] = stringify_filter
env.filters['have_sub_commands'] = have_sub_commands_filter
env.globals['raise'] = raise_exception_helper
parser_template = env.get_template('cli_parser.template')
parser_rendered = parser_template.render(root_command_name=root_command_name, root_command_id=element_id(root_command_name),
commands=all_commands, parameters=all_parameters)
with open(join(cli_app_path, root_command_name + "_cli_parser.py"), "w") as text_file:
text_file.write(parser_rendered)
# RENDER CLI COMMAND ---------------------
command_file_path = join(cli_app_path, root_command_name + '_cli.py')
if not exists(command_file_path):
command_template = env.get_template('cli_command.template')
command_rendered = command_template.render(root_command_name=root_command_name)
with open(command_file_path, "w") as text_file:
text_file.write(command_rendered)
def copy_framework(cli_app_path):
if not isdir(cli_app_path):
makedirs(cli_app_path)
copy(join(_cli_framework_path, "generic_cli_parser.py"), cli_app_path)
copy(join(_cli_framework_path, "js_date.py"), cli_app_path)
def render_runner_script(root_command_name, dest_path):
env = Environment(loader=FileSystemLoader(_cli_templates_path))
template = env.get_template('windows_cli_py_runner.template')
rendered = template.render(command_path=join(root_command_name + '_cli', root_command_name + "_cli.py"))
with open(join(dest_path, root_command_name + ".bat"), "w") as text_file:
text_file.write(rendered)
def is_root_command_defined(model, root_command_name):
model_extractor = ElementExtractor()
CidModelProcessor(model_extractor.visitor).process_model(model)
return root_command_name in [command.name for command in model_extractor.all_commands]
def generate_cli(cid_file, root_command_name, dest_path):
cli_app_path = join(dest_path, root_command_name + "_cli")
model = parse(cid_file)
if not is_root_command_defined(model, root_command_name):
print("Error: The specified root command is not defined.")
return
process_model(model)
copy_framework(cli_app_path)
render_cli_code(model, root_command_name, cli_app_path)
render_runner_script(root_command_name, dest_path)
print("Generated cli successfully.")
| 42.8
| 177
| 0.67276
| 827
| 7,276
| 5.625151
| 0.170496
| 0.054385
| 0.061264
| 0.027085
| 0.344153
| 0.280739
| 0.269132
| 0.231298
| 0.126397
| 0.092003
| 0
| 0.000518
| 0.204233
| 7,276
| 169
| 178
| 43.053254
| 0.802936
| 0.03807
| 0
| 0.128205
| 0
| 0
| 0.128414
| 0.057343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.08547
| 0.017094
| 0.213675
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeaa72a12bf7e9c9d8b1d3537dc9a129425ee115
| 2,037
|
py
|
Python
|
container/sample-inf1/inf1_mx.py
|
yunma10/neo-ai-dlr
|
1f5c65d9bf7155c016e5d2f78d273755760a4f2a
|
[
"Apache-2.0"
] | 446
|
2019-01-24T02:04:17.000Z
|
2022-03-16T13:45:32.000Z
|
container/sample-inf1/inf1_mx.py
|
yunma10/neo-ai-dlr
|
1f5c65d9bf7155c016e5d2f78d273755760a4f2a
|
[
"Apache-2.0"
] | 179
|
2019-01-24T10:03:34.000Z
|
2022-03-19T02:06:56.000Z
|
container/sample-inf1/inf1_mx.py
|
yunma10/neo-ai-dlr
|
1f5c65d9bf7155c016e5d2f78d273755760a4f2a
|
[
"Apache-2.0"
] | 111
|
2019-01-24T20:51:45.000Z
|
2022-02-18T06:22:40.000Z
|
import mxnet as mx
#import neomxnet
import os
import json
import numpy as np
from collections import namedtuple
import os
dtype='float32'
Batch = namedtuple('Batch', ['data'])
ctx = mx.neuron()
is_gpu = False
def model_fn(model_dir):
print("param {}".format(os.environ.get('MODEL_NAME_CUSTOM')))
print("ctx {}".format(ctx))
sym, arg_params, aux_params = mx.model.load_checkpoint(os.path.join(model_dir, os.environ.get('MODEL_NAME_CUSTOM')), 0)
mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
for arg in arg_params:
arg_params[arg] = arg_params[arg].astype(dtype)
for arg in aux_params:
aux_params[arg] = aux_params[arg].astype(dtype)
exe = mod.bind(for_training=False,
data_shapes=[('data', (1,3,224,224))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
return mod
def transform_fn(mod, img, input_content_type, output_content_type):
'''
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-model')
output = stream.read()
print(output)
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-ncg')
output = stream.read()
print(output)
'''
image = mx.image.imdecode(img)
resized = mx.image.resize_short(image, 224) # minimum 224x224 images
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
normalized = mx.image.color_normalize(cropped.astype(np.float32) / 255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
# the network expect batches of the form (N,3,224,224)
transposed = normalized.transpose((2, 0, 1)) # Transposing from (224, 224, 3) to (3, 224, 224)
batchified = transposed.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224)
image = batchified.astype(dtype='float32')
mod.forward(Batch([image]))
prob = mod.get_outputs()[0].asnumpy().tolist()
prob_json = json.dumps(prob)
return prob_json, output_content_type
| 37.722222
| 121
| 0.675994
| 307
| 2,037
| 4.348534
| 0.413681
| 0.031461
| 0.026217
| 0.025468
| 0.142322
| 0.101873
| 0.061423
| 0.061423
| 0.061423
| 0.061423
| 0
| 0.058507
| 0.177712
| 2,037
| 53
| 122
| 38.433962
| 0.738507
| 0.192931
| 0
| 0.052632
| 0
| 0
| 0.046354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.263158
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeab90972c87f9c41713b77c4809b4a9c645a33d
| 4,040
|
py
|
Python
|
data/process_data.py
|
KCKhoo/disaster_response_dashboard
|
ee337125121664503675bfb5bf01af85c7c1a8ca
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
data/process_data.py
|
KCKhoo/disaster_response_dashboard
|
ee337125121664503675bfb5bf01af85c7c1a8ca
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
data/process_data.py
|
KCKhoo/disaster_response_dashboard
|
ee337125121664503675bfb5bf01af85c7c1a8ca
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
''' Load and merge two CSV files - one containing messages and the other containing categories
Args:
messages_filepath (str): Path to the CSV file containing messages
categories_filepath (str): Path to the CSV file containing categories of each message
Returns:
df (DataFrame): A merged DataFrame containing messages and categories
'''
# Load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# Merge datasets
df = messages.merge(categories, on='id')
return df
def clean_data(df):
'''
Clean the data for machine learning model. Cleaning processes include:
1) Split 'categories' column in the dataframe into separate category columns.
2) Convert category values to just numbers 0 or 1 by removing the texts.
3) Replace 'categories' column in df with new category columns created in Step 1.
4) Remove duplicates.
5) Remove rows with 2 in 'related' category column.
Args:
df (DataFrame): A DataFrame
Returns:
df_clean (DataFrame): clean DataFrame
'''
# Make a copy of df
df_clean = df.copy()
# Create a dataframe of the 36 individual category columns
categories = df_clean['categories'].str.strip().str.split(';', expand=True)
# Select the first row of the categories dataframe
row = categories.iloc[0, :]
# Use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda x: x[:-2])
# Rename the columns of `categories`
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1.
for column in categories:
# Set each value to be the last character of the string
categories[column] = categories[column].str.split('-').str[-1]
# Convert column from string to numeric
categories[column] = pd.to_numeric(categories[column])
# Drop the original categories column from 'df'
df_clean = df_clean.drop(columns=['categories'])
# Concatenate the original dataframe with the new 'categories' dataframe
df_clean = pd.concat([df_clean, categories], axis=1)
# Drop duplicates
df_clean = df_clean.drop_duplicates()
# Drop rows with 2 in 'related' column
df_clean = df_clean[df_clean['related'] != 2].reset_index(drop=True)
return df_clean
def save_data(df, database_filename):
''' Save clean dataset to a SQLite database
Args:
df (DataFrame): Clean dataframe
database_filename (string): Path at which database will be stored
Returns:
None
'''
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('DisasterMessages', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 33.94958
| 98
| 0.658168
| 506
| 4,040
| 5.140316
| 0.3083
| 0.040369
| 0.017301
| 0.052288
| 0.117647
| 0.089965
| 0.05767
| 0.05767
| 0.02922
| 0
| 0
| 0.007002
| 0.257673
| 4,040
| 119
| 99
| 33.94958
| 0.860287
| 0.392822
| 0
| 0
| 0
| 0
| 0.218709
| 0.019056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.2
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeacff18635731300c340b2e253ce1bf7ee2b4e0
| 3,432
|
py
|
Python
|
pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import os
import csv
bicycles = []
basepath = 'HTMLFiles/'
outputFile = open('scraped.py','a')
outputFile.write("list=[")
len1 = len(os.listdir(basepath))
counter1 = 0
for entry in os.listdir(basepath):
counter2 = 0
len2 = len(os.listdir(basepath+'/'+entry))
for folder in os.listdir(basepath+'/'+entry):
listFile = open(basepath+entry+'/'+folder,"r")
try:
parsed = BeautifulSoup(listFile, "html.parser")
except:
print('bs4 error in '+basepath+entry+'/'+folder)
break
bicycle = {
'Brand': '-',
'Model': '-',
'Weight': '-',
'Released on the market': '-',
'For women': '-',
'For kids': '-',
'Frame material': '-',
'Frame type': '-',
'Collapsible frame': '-',
'Color': '-',
'Fork type': '-',
'Shock absorber type': '-',
'Shock absorber pressure': '-',
'Fork name': '-',
'Wheel drive': '-',
'Drive type': '-',
'Transmission type': '-',
'Number of speeds': '-',
'System name': '-',
'Cassette name': '-',
'Front derailleur gears name': '-',
'Rear derailleur gears name': '-',
'Shifters type': '-',
'Shifters name': '-',
'Front brakes': '-',
'Front brakes name': '-',
'Rear brakes': '-',
'Number of wheels': '-',
'Wheels diameter': '-',
'Double rim': '-',
'Rim material': '-',
'Rims name': '-',
'Tyres pattern': '-',
'Tyres name': '-',
'Handlebar type': '-',
'Handlebar name': '-',
'Seat type': '-',
'Seat suspension': '-',
'Seat name': '-',
'Pedals type': '-',
'Pedals name': '-',
'Front panel': '-',
'Rear panel panel': '-',
'Trunk': '-',
'Rearview mirror': '-',
'Horn': '-',
'Basket': '-'
}
tableRows = parsed.findAll('tr')
for row in tableRows:
tableData = row.findAll('td')
try:
key = tableData[0].text.strip()
value = tableData[1].text.strip()
except:
print('error in '+basepath+entry+'/'+folder)
break
else:
bicycle[key] = value
if(bicycle['Brand']!='-'):
bicycles.append(bicycle)
outputFile.write(str(bicycle)+',\n')
counter2+=1
print("parsing "+str(counter2)+" of "+str(len2)+" ", end='\r')
counter1+=1
print("\nFOLDER parsing "+str(counter1)+" of "+str(len1)+" \n", end='\r')
# keys = bicycles[0].keys()
# with open('bicycles.csv', 'w', newline='') as output_file:
# dict_writer = csv.DictWriter(output_file, keys)
# dict_writer.writeheader()
# dict_writer.writerows(bicycles)
outputFile.write(']')
toWrite = """
import csv
keys = list[0].keys()
with open('bicycles.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(list)
"""
outputFile.write(toWrite)
| 28.840336
| 106
| 0.456002
| 300
| 3,432
| 5.183333
| 0.4
| 0.038585
| 0.04373
| 0.025723
| 0.195498
| 0.195498
| 0.155627
| 0.155627
| 0.155627
| 0.155627
| 0
| 0.00917
| 0.36451
| 3,432
| 119
| 107
| 28.840336
| 0.703806
| 0.059149
| 0
| 0.083333
| 0
| 0
| 0.315447
| 0.024504
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeb69df1582f775092e1af736d2173a50d2365bb
| 484
|
py
|
Python
|
tests/test_lines_count.py
|
MacHu-GWU/single_file_module-project
|
01f7a6b250853bebfd73de275895bf274325cfc1
|
[
"MIT"
] | 3
|
2017-02-27T05:07:46.000Z
|
2022-01-17T06:46:20.000Z
|
tests/test_lines_count.py
|
MacHu-GWU/single_file_module-project
|
01f7a6b250853bebfd73de275895bf274325cfc1
|
[
"MIT"
] | null | null | null |
tests/test_lines_count.py
|
MacHu-GWU/single_file_module-project
|
01f7a6b250853bebfd73de275895bf274325cfc1
|
[
"MIT"
] | 1
|
2017-09-05T14:05:55.000Z
|
2017-09-05T14:05:55.000Z
|
# -*- coding: utf-8 -*-
import os
import pytest
from sfm import lines_count
def test_lines_count():
assert lines_count.count_lines(__file__) >= 22
def test_lines_stats():
n_files, n_lines = lines_count.lines_stats(
os.path.dirname(__file__), lines_count.filter_python_script)
assert n_files >= 17
assert n_lines >= 1096
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 20.166667
| 68
| 0.688017
| 68
| 484
| 4.367647
| 0.470588
| 0.16835
| 0.080808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022901
| 0.188017
| 484
| 23
| 69
| 21.043478
| 0.732824
| 0.043388
| 0
| 0.142857
| 0
| 0
| 0.045553
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eebdcac25970fd8db9e1b4ca1a89af16a4e7a240
| 803
|
py
|
Python
|
slushtools/string/__init__.py
|
ZackPaceCoder/slushtools
|
32bfee028d30fd8fd88e332bdd744a71e51d6dcc
|
[
"MIT"
] | null | null | null |
slushtools/string/__init__.py
|
ZackPaceCoder/slushtools
|
32bfee028d30fd8fd88e332bdd744a71e51d6dcc
|
[
"MIT"
] | null | null | null |
slushtools/string/__init__.py
|
ZackPaceCoder/slushtools
|
32bfee028d30fd8fd88e332bdd744a71e51d6dcc
|
[
"MIT"
] | null | null | null |
# Slush Tools STRING Module
class String:
bu = None
dat = None
def __init__(str):
if str == None:
print("String argument required.")
exit()
else:
dat = str
bu = str
return dat
def reset():
dat = bu
return dat
def format(type="custom",args={}):
if type == "custom":
for i,v in args:
x = dat.split("$" + i)
v.join(v)
dat = x
return dat
elif type == "py":
x = dat.format(*args)
dat = x
return dat
else:
print("Unknown format type.")
def append(str):
dat = dat + str
return dat
def endswith(str):
if dat[len(dat)-len(str):len(str)] == str:
return True
else:
return False
def simple(delimiter):
return dat.split(delimiter)
| 20.075
| 48
| 0.523039
| 106
| 803
| 3.924528
| 0.377358
| 0.129808
| 0.086538
| 0.072115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.354919
| 803
| 39
| 49
| 20.589744
| 0.803089
| 0.031133
| 0
| 0.27027
| 0
| 0
| 0.07732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0
| 0
| 0.027027
| 0.459459
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eebe3ee2689c486643e9c66684f0834e67a050c1
| 2,001
|
py
|
Python
|
lib/gams/general_utils.py
|
zzzace2000/nodegam
|
79c8675e65d75237f2e853ae55bbc40ae7124ee9
|
[
"MIT"
] | 7
|
2021-11-06T14:26:07.000Z
|
2022-03-17T10:27:17.000Z
|
lib/gams/general_utils.py
|
zzzace2000/node
|
4501233177173ee9b246a5a5e462afd3b1d51bbb
|
[
"MIT"
] | 1
|
2022-03-22T01:08:27.000Z
|
2022-03-22T17:19:50.000Z
|
lib/gams/general_utils.py
|
zzzace2000/node
|
4501233177173ee9b246a5a5e462afd3b1d51bbb
|
[
"MIT"
] | 1
|
2021-11-06T14:27:05.000Z
|
2021-11-06T14:27:05.000Z
|
import time, os
import numpy as np
import json
class Timer:
def __init__(self, name, remove_start_msg=True):
self.name = name
self.remove_start_msg = remove_start_msg
def __enter__(self):
self.start_time = time.time()
print('Run "%s".........' % self.name, end='\r' if self.remove_start_msg else '\n')
def __exit__(self, exc_type, exc_val, exc_tb):
time_diff = float(time.time() - self.start_time)
time_str = '{:.1f}s'.format(time_diff) if time_diff >= 1 else '{:.0f}ms'.format(time_diff * 1000)
print('Finish "{}" in {}'.format(self.name, time_str))
def output_csv(the_path, data_dict, order=None, delimiter=','):
if the_path.endswith('.tsv'):
delimiter = '\t'
is_file_exists = os.path.exists(the_path)
with open(the_path, 'a+') as op:
keys = list(data_dict.keys())
if order is not None:
keys = order + [k for k in keys if k not in order]
col_title = delimiter.join([str(k) for k in keys])
if not is_file_exists:
print(col_title, file=op)
else:
old_col_title = open(the_path, 'r').readline().strip()
if col_title != old_col_title:
old_order = old_col_title.split(delimiter)
additional_keys = [k for k in keys if k not in old_order]
if len(additional_keys) > 0:
print('WARNING! The data_dict has following additional keys %s' % (str(additional_keys)))
no_key = [k for k in old_order if k not in keys]
if len(no_key) > 0:
raise(RuntimeError('The data_dict does not have the following old keys: %s' % str(no_key)))
keys = old_order + additional_keys
print(delimiter.join([str(data_dict[k]) for k in keys]), file=op)
def vector_in(vec, names):
is_kept = (vec == names[0])
for m_name in names[1:]:
is_kept = (is_kept | (vec == m_name))
return is_kept
| 35.105263
| 111
| 0.592704
| 300
| 2,001
| 3.71
| 0.303333
| 0.043127
| 0.022462
| 0.031447
| 0.055705
| 0.045822
| 0.034142
| 0.034142
| 0.034142
| 0
| 0
| 0.00766
| 0.282359
| 2,001
| 56
| 112
| 35.732143
| 0.767409
| 0
| 0
| 0
| 0
| 0
| 0.085957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0
| 0.071429
| 0
| 0.238095
| 0.119048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eec036acad92775b225df98eed2eda788c78e178
| 32,553
|
py
|
Python
|
mindaffectBCI/decoder/utils.py
|
rohitvk1/pymindaffectBCI
|
0348784d9b0fbd9d595e31ae46d2e74632399507
|
[
"MIT"
] | 44
|
2020-02-07T15:01:47.000Z
|
2022-03-21T14:36:15.000Z
|
mindaffectBCI/decoder/utils.py
|
CkiChen/pymindaffectBCI
|
0119145a8b280c776f4c4e6cd776fed0f0156404
|
[
"MIT"
] | 17
|
2020-02-07T17:11:23.000Z
|
2022-02-20T18:01:42.000Z
|
mindaffectBCI/decoder/utils.py
|
CkiChen/pymindaffectBCI
|
0119145a8b280c776f4c4e6cd776fed0f0156404
|
[
"MIT"
] | 19
|
2020-02-07T17:13:22.000Z
|
2022-03-17T01:22:35.000Z
|
# Copyright (c) 2019 MindAffect B.V.
# Author: Jason Farquhar <jason@mindaffect.nl>
# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.
#
# pymindaffectBCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymindaffectBCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>
import numpy as np
# time-series tests
def window_axis(a, winsz, axis=0, step=1, prependwindowdim=False):
''' efficient view-based slicing of equal-sized equally-spaced windows along a selected axis of a numpy nd-array '''
if axis < 0: # no negative axis indices
axis = len(a.shape)+axis
# compute the shape/strides for the windowed view of a
if prependwindowdim: # window dim before axis
shape = a.shape[:axis] + (winsz, int((a.shape[axis]-winsz)/step)+1) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis], a.strides[axis]*step) + a.strides[(axis+1):]
else: # window dim after axis
shape = a.shape[:axis] + (int((a.shape[axis]-winsz)/step)+1, winsz) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis]*step, a.strides[axis]) + a.strides[(axis+1):]
#print("a={}".format(a.shape))
#print("shape={} stride={}".format(shape,strides))
# return the computed view
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def equals_subarray(a, pat, axis=-1, match=-1):
''' efficiently find matches of a 1-d sub-array along axis within an nd-array '''
if axis < 0: # no negative dims
axis = a.ndim+axis
# reshape to match dims of a
if not isinstance(pat, np.ndarray): pat = np.array(pat) # ensure is numpy
pshape = np.ones(a.ndim+1, dtype=int); pshape[axis+1] = pat.size
pat = np.array(pat.ravel(),dtype=a.dtype).reshape(pshape) # [ ... x l x...]
# window a into pat-len pieces
aw = window_axis(a, pat.size, axis=axis, step=1) # [ ... x t-l x l x ...]
# do the match
F = np.all(np.equal(aw, pat), axis=axis+1) # [... x t-l x ...]
# pad to make the same shape as input
padshape = list(a.shape); padshape[axis] = a.shape[axis]-F.shape[axis]
if match == -1: # match at end of pattern -> pad before
F = np.append(np.zeros(padshape, dtype=F.dtype), F, axis)
else: # match at start of pattern -> pad after
F = np.append(F, np.zeros(padshape, dtype=F.dtype), axis)
return F
class RingBuffer:
''' time efficient linear ring-buffer for storing packed data, e.g. continguous np-arrays '''
def __init__(self, maxsize, shape, dtype=np.float32):
self.elementshape = shape
self.bufshape = (int(maxsize), )+shape
self.buffer = np.zeros((2*int(maxsize), np.prod(shape)), dtype=dtype) # store as 2d
# position for the -1 element. N.B. start maxsize so pos-maxsize is always valid
self.pos = int(maxsize)
self.n = 0 # count of total number elements added to the buffer
self.copypos = 0 # position of the last element copied to the 1st half
self.copysize = 0 # number entries to copy as a block
def clear(self):
'''empty the ring-buffer and reset to empty'''
self.pos=int(self.bufshape[0])
self.n =0
self.copypos=0
self.copysize=0
def append(self, x):
'''add single element to the ring buffer'''
return self.extend(x[np.newaxis, ...])
def extend(self, x):
'''add a group of elements to the ring buffer'''
# TODO[] : incremental copy to the 1st half, to spread the copy cost?
nx = x.shape[0]
if self.pos+nx >= self.buffer.shape[0]:
flippos = self.buffer.shape[0]//2
# flippos-nx to 1st half
self.buffer[:(flippos-nx), :] = self.buffer[(self.pos-(flippos-nx)):self.pos, :]
# move cursor to end 1st half
self.pos = flippos-nx
# insert in the buffer
self.buffer[self.pos:self.pos+nx, :] = x.reshape((nx, self.buffer.shape[1]))
# move the cursor
self.pos = self.pos+nx
# update the count
self.n = self.n + nx
return self
@property
def shape(self):
return (min(self.n,self.bufshape[0]),)+self.bufshape[1:]
def unwrap(self):
'''get a view on the valid portion of the ring buffer'''
return self.buffer[self.pos-min(self.n,self.bufshape[0]):self.pos, :].reshape(self.shape)
def __getitem__(self, item):
return self.unwrap()[item]
def __iter__(self):
return iter(self.unwrap())
def extract_ringbuffer_segment(rb, bgn_ts, end_ts=None):
''' extract the data between start/end time stamps, from time-stamps contained in the last channel of a nd matrix'''
# get the data / msgs from the ringbuffers
X = rb.unwrap() # (nsamp,nch+1)
X_ts = X[:, -1] # last channel is timestamps
# TODO: binary-search to make these searches more efficient!
# search backwards for trial-start time-stamp
# TODO[X] : use a bracketing test.. (better with wrap-arround)
bgn_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < bgn_ts, bgn_ts <= X_ts[1:]))
# get the index of this timestamp, guarding for after last sample
if len(bgn_samp) == 0 :
bgn_samp = 0 if bgn_ts <= X_ts[0] else len(X_ts)+1
else:
bgn_samp = bgn_samp[0]
# and just to be sure the trial-end timestamp
if end_ts is not None:
end_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < end_ts, end_ts <= X_ts[1:]))
# get index of this timestamp, guarding for after last data sample
end_samp = end_samp[-1] if len(end_samp) > 0 else len(X_ts)
else: # until now
end_samp = len(X_ts)
# extract the trial data, and make copy (just to be sure)
X = X[bgn_samp:end_samp+1, :].copy()
return X
def unwrap(x,range=None):
''' unwrap a list of numbers to correct for truncation due to limited bit-resolution, e.g. time-stamps stored in 24bit integers'''
if range is None:
range = 1<< int(np.ceil(np.log2(max(x))))
wrap_ind = np.diff(x) < -range/2
unwrap = np.zeros(x.shape)
unwrap[np.flatnonzero(wrap_ind)+1]=range
unwrap=np.cumsum(unwrap)
x = x + unwrap
return x
def unwrap_test():
x = np.cumsum(np.random.rand(6000,1))
xw = x%(1<<10)
xuw = unwrap(x)
import matplotlib.pyplot as plt
plt.plot(x,label='x')
plt.plot(xw,label='x (wrapped)')
plt.plot(xuw,label='x (unwrapped')
plt.legend()
def search_directories_for_file(f,*args):
"""search a given set of directories for given filename, return 1st match
Args:
f (str): filename to search for (or a pattern)
*args (): set for directory names to look in
Returns:
f (str): the *first* full path to where f is found, or f if not found.
"""
import os
import glob
f = os.path.expanduser(f)
if os.path.exists(f) or len(glob.glob(f))>0:
return f
for d in args:
#print('Searching dir: {}'.format(d))
df = os.path.join(d,f)
if os.path.exists(df) or len(glob.glob(df))>0:
f = df
break
return f
# toy data generation
#@function
def randomSummaryStats(d=10, nE=2, tau=10, nY=1):
import numpy as np
# pure random test-case
Cxx = np.random.standard_normal((d, d))
Cxy = np.random.standard_normal((nY, nE, tau, d))
Cyy = np.random.standard_normal((nY, nE, tau, nE, tau))
return (Cxx, Cxy, Cyy)
def testNoSignal(d=10, nE=2, nY=1, isi=5, tau=None, nSamp=10000, nTrl=1):
# Simple test-problem -- no real signal
if tau is None:
tau = 10*isi
X = np.random.standard_normal((nTrl, nSamp, d))
stimTimes_samp = np.arange(0, X.shape[-2] - tau, isi)
Me = np.random.standard_normal((nTrl, len(stimTimes_samp), nY, nE))>1
Y = np.zeros((nTrl, X.shape[-2], nY, nE))
Y[:, stimTimes_samp, :, :] = Me
return (X, Y, stimTimes_samp)
def testSignal(nTrl=1, d=5, nE=2, nY=30, isi=5, tau=None, offset=0, nSamp=10000, stimthresh=.6, noise2signal=1, irf=None):
#simple test problem, with overlapping response
import numpy as np
if tau is None:
tau = 10 if irf is None else len(irf)
nEp = int((nSamp-tau)/isi)
cb = np.random.standard_normal((nEp, nY, nE)) > stimthresh # codebook = per-epoch stimulus activity
E = cb # (nEp, nY, nE) # per-epoch stimulus activity
# up-sample to sample rate
stimTimes_samp = np.arange(0, nSamp-tau, isi) # (nEp)
Y = np.zeros((nSamp, nY, E.shape[-1]))
Y[stimTimes_samp, :, :] = E[:len(stimTimes_samp), :, :] #per-sample stimulus activity (nSamp, nY, nE) [nE x nY x nSamp]
Y = np.tile(Y,(nTrl,1,1,1)) # replicate for the trials
# generate the brain source
A = np.random.standard_normal((nE, d)) # spatial-pattern for the source signal
if irf is None:
B = np.zeros((tau), dtype=np.float32)
B[-3] = 1; # true response filter (shift by 10 samples)
else:
B = np.array(irf, dtype=np.float32)
Ytrue = Y[..., 0, :] # (nTrl, nSamp, nE)
if True:
# convolve with the impulse response - manually using window_axis
# zero pad before for the sliding window
Ys = np.zeros(Ytrue.shape[:-2]+(Ytrue.shape[-2]+tau-1,)+Ytrue.shape[-1:])
Ys[..., tau-1+offset:Ytrue.shape[-2]+tau-1+offset, :] = Ytrue # zero-pad at front + include the offset.
Yse = window_axis(Ys, winsz=len(B), axis=-2) # (nTr,nSamp,tau,nE)
YtruecB = np.einsum("Tste,t->Tse", Yse, B[::-1]) # N.B. time-reverse irf (nTr,nSamp,nE)
else:
# use the np convolve function, N.B. implicitly time reverses B (like we want)
YtruecB = np.array([np.convolve(Ytrue[:, ei], B, 'full') for ei in range(Ytrue.shape[-1])]).T #(nSamp+pad, nE) [nE x nSamp]
YtruecB = YtruecB[:Ytrue.shape[0], :] # trim the padding
#import matplotlib.pyplot as plt; plt.clf(); plt.plot(Ytrue[:100,0],'b*',label='Y'); plt.plot(YtruecB[:100,0],'g*',label='Y*B'); plt.plot(B,'k',label='B'); plt.legend()
#print("Ytrue={}".format(Ytrue.shape))
#print("YtruecB={}".format(YtruecB.shape))
S = YtruecB # (nTr, nSamp, nE) true response, i.e. filtered Y
N = np.random.standard_normal(S.shape[:-1]+(d,)) # EEG noise (nTr, nSamp, d)
X = np.einsum("tse,ed->tsd", S, A) + noise2signal*N # simulated data.. true source mapped through spatial pattern (nSamp, d) #[d x nSamp]
return (X, Y, stimTimes_samp, A, B)
def testtestSignal():
import matplotlib.pyplot as plt
plt.clf()
# shift by 5
offset=0; irf=(0,0,0,0,0,1,0,0,0,0)
X,Y,st,W,R = testSignal(nTrl=1,nSamp=500,d=1,nE=1,nY=1,isi=10,tau=10,offset=offset,irf=irf,noise2signal=0)
plt.subplot(311);plt.plot(X[0,:,0],label='X');plt.plot(Y[0,:,0,0],label='Y');plt.title("offset={}, irf={}".format(offset,irf));plt.legend()
# back-shift-by-5 -> 0 shift
offset=-5
X,Y,st,W,R = testSignal(nTrl=1,nSamp=500,d=1,nE=1,nY=1,isi=10,tau=10,offset=offset,irf=(0,0,0,0,0,1,0,0,0,0),noise2signal=0)
plt.subplot(312);plt.plot(X[0,:,0],label='X');plt.plot(Y[0,:,0,0],label='Y');plt.title("offset={}, irf={}".format(offset,irf));plt.legend()
# back-shift-by-10 -> -5 shift
offset=-9
X,Y,st,W,R = testSignal(nTrl=1,nSamp=500,d=1,nE=1,nY=1,isi=10,tau=10,offset=offset,irf=(0,0,0,0,0,1,0,0,0,0),noise2signal=0)
plt.subplot(313);plt.plot(X[0,:,0],label='X');plt.plot(Y[0,:,0,0],label='Y');plt.title("offset={}, irf={}".format(offset,irf));plt.legend()
def sliceData(X, stimTimes_samp, tau=10):
# make a sliced version
dst = np.diff(stimTimes_samp)
if np.all(dst == dst[0]) and stimTimes_samp[0] == 0: # fast path equaly spaced stimTimes
Xe = window_axis(X, winsz=tau, axis=-2, step=int(dst[0]), prependwindowdim=False) # (nTrl, nEp, tau, d) #d x tau x ep x trl
else:
Xe = np.zeros(X.shape[:-2] + (len(stimTimes_samp), tau, X.shape[-1])) # (nTrl, nEp, tau, d) [ d x tau x nEp x nTrl ]
for ei, si in enumerate(stimTimes_samp):
idx = range(si, si+tau)
Xe[:, ei, :, :] = X[:, idx, :] if X.ndim > 2 else X[idx, :]
return Xe
def sliceY(Y, stimTimes_samp, featdim=True):
'''
Y = (nTrl, nSamp, nY, nE) if featdim=True
OR
Y=(nTrl, nSamp, nY) if featdim=False #(nE x nY x nSamp x nTrl)
'''
# make a sliced version
si = np.array(stimTimes_samp, dtype=int)
if featdim:
return Y[:, si, :, :] if Y.ndim > 3 else Y[si, :, :]
else:
return Y[:, si, :] if Y.ndim > 2 else Y[si, :]
def block_randomize(true_target, npermute, axis=-3, block_size=None):
''' make a block random permutaton of the input array
Inputs:
npermute: int - number permutations to make
true_target: (..., nEp, nY, e): true target value for nTrl trials of length nEp flashes
axis : int the axis along which to permute true_target'''
if true_target.ndim < 3:
raise ValueError("true target info must be at least 3d")
if not (axis == -3 or axis == true_target.ndim-2):
raise NotImplementedError("Only implementated for axis=-2 currently")
# estimate the number of blocks to use
if block_size is None:
block_size = max(1, true_target.shape[axis]/2/npermute)
nblk = int(np.ceil(true_target.shape[axis]/block_size))
blk_lims = np.linspace(0, true_target.shape[axis], nblk, dtype=int)
# convert to start/end index for each block
blk_lims = [(blk_lims[i], blk_lims[i+1]) for i in range(len(blk_lims)-1)]
cb = np.zeros(true_target.shape[:axis+1] + (npermute, true_target.shape[-1]))
for ti in range(cb.shape[axis+1]):
for di, dest_blk in enumerate(blk_lims):
yi = np.random.randint(true_target.shape[axis+1])
si = np.random.randint(len(blk_lims))
# ensure can't be the same block
if si == di:
si = si+1 if si < len(blk_lims)-1 else si-1
src_blk = blk_lims[si]
# guard for different lengths for source/dest blocks
dest_len = dest_blk[1] - dest_blk[0]
if dest_len > src_blk[1]-src_blk[0]:
if src_blk[0]+dest_len < true_target.shape[axis]:
# enlarge the src
src_blk = (src_blk[0], src_blk[0]+dest_len)
elif src_blk[1]-dest_len > 0:
src_blk = (src_blk[1]-dest_len, src_blk[1])
else:
raise ValueError("can't fit source and dest")
elif dest_len < src_blk[1]-src_blk[0]:
src_blk = (src_blk[0], src_blk[0]+dest_len)
cb[..., dest_blk[0]:dest_blk[1], ti, :] = true_target[..., src_blk[0]:src_blk[1], yi, :]
return cb
def upsample_codebook(trlen, cb, ep_idx, stim_dur_samp, offset_samp=(0, 0)):
''' upsample a codebook definition to sample rate
Inputs:
trlen : (int) length after up-sampling
cb : (nTr, nEp, ...) the codebook
ep_idx : (nTr, nEp) the indices of the codebook entries
stim_dur_samp: (int) the amount of time the cb entry is held for
offset_samp : (2,):int the offset for the stimulus in the upsampled trlen data
Outputs:
Y : ( nTrl, trlen, ...) the up-sampled codebook '''
if ep_idx is not None:
if not np.all(cb.shape[:ep_idx.ndim] == ep_idx.shape):
raise ValueError("codebook and epoch indices must has same shape")
trl_idx = ep_idx[:, 0] # start each trial
else: # make dummy ep_idx with 0 for every trial!
ep_idx = np.zeros((cb.shape[0],1),dtype=int)
trl_idx = ep_idx
Y = np.zeros((cb.shape[0], trlen)+ cb.shape[2:], dtype='float32') # (nTr, nSamp, ...)
for ti, trl_start_idx in enumerate(trl_idx):
for ei, epidx in enumerate(ep_idx[ti, :]):
if ei > 0 and epidx == 0: # zero indicates end of variable length trials
break
# start index for this epoch in this *trial*, including the 0-offset
ep_start_idx = -int(offset_samp[0])+int(epidx-trl_start_idx)
Y[ti, ep_start_idx:(ep_start_idx+int(stim_dur_samp)), ...] = cb[ti, ei, ...]
return Y
def lab2ind(lab,lab2class=None):
''' convert a list of labels (as integers) to a class indicator matrix'''
if lab2class is None:
lab2class = [ (l,) for l in set(lab) ] # N.B. list of lists
if not isinstance(lab,np.ndarray):
lab=np.array(lab)
Y = np.zeros(lab.shape+(len(lab2class),),dtype=bool)
for li,ls in enumerate(lab2class):
for l in ls:
Y[lab == l, li]=True
return (Y,lab2class)
def zero_outliers(X, Y, badEpThresh=4, badEpChThresh=None, verbosity=0):
'''identify and zero-out bad/outlying data
Inputs:
X = (nTrl, nSamp, d)
Y = (nTrl, nSamp, nY, nE) OR (nTrl, nSamp, nE)
nE=#event-types nY=#possible-outputs nEpoch=#stimulus events to process
'''
# remove whole bad epochs first
if badEpThresh > 0:
bad_ep, _ = idOutliers(X, badEpThresh, axis=(-2, -1)) # ave over time,ch
if np.any(bad_ep):
if verbosity > 0:
print("{} badEp".format(np.sum(bad_ep.ravel())))
# copy X,Y so don't modify in place!
X = X.copy()
Y = Y.copy()
X[bad_ep[..., 0, 0], ...] = 0
#print("Y={}, Ybad={}".format(Y.shape, Y[bad_ep[..., 0, 0], ...].shape))
# zero out Y also, so don't try to 'fit' the bad zeroed data
Y[bad_ep[..., 0, 0], ...] = 0
# Remove bad individual channels next
if badEpChThresh is None: badEpChThresh = badEpThresh*2
if badEpChThresh > 0:
bad_epch, _ = idOutliers(X, badEpChThresh, axis=-2) # ave over time
if np.any(bad_epch):
if verbosity > 0:
print("{} badEpCh".format(np.sum(bad_epch.ravel())))
# make index expression to zero out the bad entries
badidx = list(np.nonzero(bad_epch)) # convert to linear indices
badidx[-2] = slice(X.shape[-2]) # broadcast over the accumulated dimensions
if not np.any(bad_ep): # copy so don't update in place
X = X.copy()
X[tuple(badidx)] = 0
return (X, Y)
def idOutliers(X, thresh=4, axis=-2, verbosity=0):
''' identify outliers with excessively high power in the input data
Inputs:
X:float the data to identify outliers in
axis:int (-2) axis of X to sum to get power
thresh(float): threshold standard deviation for outlier detection
verbosity(int): verbosity level
Returns:
badEp:bool (X.shape axis==1) indicator for outlying elements
epPower:float (X.shape axis==1) power used to identify bad
'''
#print("X={} ax={}".format(X.shape,axis))
power = np.sqrt(np.sum(X**2, axis=axis, keepdims=True))
#print("power={}".format(power.shape))
good = np.ones(power.shape, dtype=bool)
for _ in range(4):
mu = np.mean(power[good])
sigma = np.sqrt(np.mean((power[good] - mu) ** 2))
badThresh = mu + thresh*sigma
good[power > badThresh] = False
good = good.reshape(power.shape) # (nTrl, nEp)
#print("good={}".format(good.shape))
bad = ~good
if verbosity > 1:
print("%d bad" % (np.sum(bad.ravel())))
return (bad, power)
def robust_mean(X,thresh=(3,3)):
"""Compute robust mean of values in X, using gaussian outlier criteria
Args:
X (the data): the data
thresh (2,): lower and upper threshold in standard deviations
Returns:
mu (): the robust mean
good (): the indices of the 'good' data in X
"""
good = np.ones(X.shape, dtype=bool)
for _ in range(4):
mu = np.mean(X[good])
sigma = np.sqrt(np.mean((X[good] - mu) ** 2))
# re-compute outlier list
good[:]=True
if thresh[0] is not None:
badThresh = mu + thresh[0]*sigma
good[X > badThresh] = False
if thresh[1] is not None:
badThresh = mu - thresh[0]*sigma
good[X < badThresh] = False
mu = np.mean(X[good])
return (mu, good)
try:
from scipy.signal import butter, bessel, sosfilt, sosfilt_zi
except:
#if True:
# use the pure-python fallbacks
def sosfilt(sos,X,axis,zi):
return sosfilt_2d_py(sos,X,axis=axis,zi=zi)
def sosfilt_zi(sos):
return sosfilt_zi_py(sos)
def butter(order,freq,btype,output):
return butter_py(order,freq,btype,output)
def sosfilt_zi_warmup(zi, X, axis=-1, sos=None):
'''Use some initial data to "warmup" a second-order-sections filter to reduce startup artifacts.
Args:
zi (np.ndarray): the sos filter, state
X ([type]): the warmup data
axis (int, optional): The filter axis in X. Defaults to -1.
sos ([type], optional): the sos filter coefficients. Defaults to None.
Returns:
[np.ndarray]: the warmed up filter coefficients
'''
if axis < 0: # no neg axis
axis = X.ndim+axis
# zi => (order,...,2,...)
zi = np.reshape(zi, (zi.shape[0],) + (1,)*(axis) + (zi.shape[1],) + (1,)*(X.ndim-axis-1))
# make a programattic index expression to support arbitary axis
idx = [slice(None)]*X.ndim
# get the index to start the warmup
warmupidx = 0 if sos is None else min(sos.size*3,X.shape[axis]-1)
# center on 1st warmup value
idx[axis] = slice(warmupidx,warmupidx+1)
zi = zi * X[tuple(idx)]
# run the filter on the rest of the warmup values
if not sos is None and warmupidx>3:
idx[axis] = slice(warmupidx,1,-1)
_, zi = sosfilt(sos, X[tuple(idx)], axis=axis, zi=zi)
return zi
def iir_sosfilt_sos(stopband, fs, order=4, ftype='butter', passband=None, verb=0):
''' given a set of filter cutoffs return butterworth or bessel sos coefficients '''
# convert to normalized frequency, Note: not to close to 0/1
if stopband is None:
return np.array(())
if not hasattr(stopband[0],'__iter__'):
stopband=(stopband,)
sos=[]
for sb in stopband:
btype = None
if type(sb[-1]) is str:
btype = sb[-1]
sb = sb[:-1]
# convert to normalize frequency
sb = np.array(sb,dtype=np.float32)
sb[sb<0] = (fs/2)+sb[sb<0]+1 # neg freq count back from nyquist
Wn = sb/(fs/2)
if Wn[1] < .0001 or .9999 < Wn[0]: # no filter
continue
# identify type from frequencies used, cliping if end of frequency range
if Wn[0] < .0001:
Wn = Wn[1]
btype = 'highpass' if btype is None or btype == 'bandstop' else 'lowpass'
elif .9999 < Wn[1]:
Wn = Wn[0]
btype = 'lowpass' if btype is None or btype == 'bandstop' else 'highpass'
elif btype is None: # .001 < Wn[0] and Wn[1] < .999:
btype = 'bandstop'
if verb>0: print("{}={}={}".format(btype,sb,Wn))
if ftype == 'butter':
sosi = butter(order, Wn, btype=btype, output='sos')
elif ftype == 'bessel':
sosi = bessel(order, Wn, btype=btype, output='sos', norm='phase')
else:
raise ValueError("Unrecognised filter type")
sos.append(sosi)
# single big filter cascade
sos = np.concatenate(sos,axis=0)
return sos
def butter_sosfilt(X, stopband, fs:float, order:int=6, axis:int=-2, zi=None, verb=True, ftype='butter'):
"""use a (cascade of) butterworth SOS filter(s) filter X along axis
Args:
X (np.ndarray): the data to be filtered
stopband ([type]): the filter band specifications in Hz, as a list of lists of stopbands (given as (low-pass,high-pass)) or pass bands (given as (low-cut,high-cut,'bandpass'))
fs (float): the sampling rate of X
order (int, optional): the desired filter order. Defaults to 6.
axis (int, optional): the axis of X to filter along. Defaults to -2.
zi ([type], optional): the internal filter state -- propogate between calls for incremental filtering. Defaults to None.
verb (bool, optional): Verbosity level for logging. Defaults to True.
ftype (str, optional): The type of filter to make, one-of: 'butter', 'bessel'. Defaults to 'butter'.
Returns:
X [np.ndarray]: the filtered version of X
sos (np.ndarray): the designed filter coefficients
zi (np.ndarray): the filter state for propogation between calls
""" ''' '''
if stopband is None: # deal with no filter case
return (X,None,None)
if axis < 0: # no neg axis
axis = X.ndim+axis
# TODO []: auto-order determination?
sos = iir_sosfilt_sos(stopband, fs, order, ftype=ftype)
sos = sos.astype(X.dtype) # keep as single precision
if axis == X.ndim-2 and zi is None:
zi = sosfilt_zi(sos) # (order,2)
zi = zi.astype(X.dtype)
zi = sosfilt_zi_warmup(zi, X, axis, sos)
else:
zi = None
print("Warning: not warming up...")
# Apply the warmed up filter to the input data
#print("zi={}".format(zi.shape))
if not zi is None:
#print("filt:zi X{} axis={}".format(X.shape,axis))
X, zi = sosfilt(sos, X, axis=axis, zi=zi)
else:
print("filt:no-zi")
X = sosfilt(sos, X, axis=axis) # zi=zi)
# return filtered data, filter-coefficients, filter-state
return (X, sos, zi)
def save_butter_sosfilt_coeff(filename=None, stopband=((45,65),(5.5,25,'bandpass')), fs=200, order=6, ftype='butter'):
''' design a butterworth sos filter cascade and save the coefficients '''
import pickle
sos = iir_sosfilt_sos(stopband, fs, order, passband=None, ftype=ftype)
zi = sosfilt_zi(sos)
if filename is None:
# auto-generate descriptive filename
filename = "{}_stopband{}_fs{}.pk".format(ftype,stopband,fs)
print("Saving to: {}\n".format(filename))
with open(filename,'wb') as f:
pickle.dump(sos,f)
pickle.dump(zi,f)
f.close()
def test_butter_sosfilt():
fs= 100
X = np.random.randn(fs*10,2)
X = np.cumsum(X,0)
X = X + np.random.randn(1,X.shape[1])*100 # include start shift
import matplotlib.pyplot as plt
plt.clf();plt.subplot(511);plt.plot(X);
pbs=(((0,1),(40,-1)),(10,-1),((5,10),(15,20),(45,50)))
for i,pb in enumerate(pbs):
Xf,_,_ = butter_sosfilt(X,pb,fs)
plt.subplot(5,1,i+2);plt.plot(Xf);plt.title("{}".format(pb))
# test incremental application
pb=pbs[0]
sos=None
zi =None
Xf=[]
for i in range(0,X.shape[0],fs):
if sos is None:
# init filter and do 1st block
Xfi,sos,zi = butter_sosfilt(X[i:i+fs,:],pb,fs,axis=-2)
else: # incremenally apply
Xfi,zi = sosfilt(sos,X[i:i+fs,:],axis=-2,zi=zi)
Xf.append(Xfi)
Xf = np.concatenate(Xf,axis=0)
plt.subplot(5,1,5);plt.plot(Xf);plt.title("{} - incremental".format(pb))
plt.show()
# test diff specifications
pb = ((0,1),(40,-1)) # pair stops
Xf0,_,_ = butter_sosfilt(X,pb,fs,axis=-2)
plt.subplot(3,1,1);plt.plot(Xf0);plt.title("{}".format(pb))
pb = (1,40,'bandpass') # single pass
Xfi,_,_ = butter_sosfilt(X,pb,fs,axis=-2)
plt.subplot(3,1,2);plt.plot(Xfi);plt.title("{}".format(pb))
pb = (1,40,'bandpass') # single pass
Xfi,_,_ = butter_sosfilt(X,pb,fs,axis=-2,ftype='bessel')
plt.subplot(3,1,3);plt.plot(Xfi);plt.title("{} - bessel".format(pb))
plt.show()
# TODO[] : cythonize?
# TODO[X] : vectorize over d? ---- NO. 2.5x *slower*
def sosfilt_2d_py(sos,X,axis=-2,zi=None):
''' pure python fallback for second-order-sections filter in case scipy isn't available '''
X = np.asarray(X)
sos = np.asarray(sos)
if zi is None:
returnzi = False
zi = np.zeros((sos.shape[0],2,X.shape[-1]),dtype=X.dtype)
else:
returnzi = True
zi = np.asarray(zi)
Xshape = X.shape
if not X.ndim == 2:
print("Warning: X>2d.... treating as 2d...")
X = X.reshape((-1,Xshape[-1]))
if axis < 0:
axis = X.ndim + axis
if not axis == X.ndim-2:
raise ValueError("Only for time in dim 0/-2")
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
if zi.ndim != 3 or zi.shape[1] != 2 or zi.shape[2] != X.shape[1]:
raise ValueError('zi must be shape (n_sections, 2, dim)')
# pre-normalize sos if needed
for j in range(sos.shape[0]):
if sos[j,3] != 1.0:
sos[j,:] = sos[j,:]/sos[j,3]
n_signals = X.shape[1]
n_samples = X.shape[0]
n_sections = sos.shape[0]
# extract the a/b
b = sos[:,:3]
a = sos[:,4:]
# loop over outputs
x_n = 0
for i in range(n_signals):
for n in range(n_samples):
for s in range(n_sections):
x_n = X[n, i]
# use direct II transposed structure
X[n, i] = b[s, 0] * x_n + zi[s, 0, i]
zi[s, 0, i] = b[s, 1] * x_n - a[s, 0] * X[n, i] + zi[s, 1, i]
zi[s, 1, i] = b[s, 2] * x_n - a[s, 1] * X[n, i]
# back to input shape
if not len(Xshape) == 2:
X = X.reshape(Xshape)
# match sosfilt, only return zi if given zi
if returnzi :
return X, zi
else:
return X
def sosfilt_zi_py(sos):
''' compute an initial state for a second-order section filter '''
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
IminusA = np.eye(n_sections - 1) - np.linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
lfilter_zi = np.linalg.solve(IminusA, B)
zi[section] = scale * lfilter_zi
scale *= b.sum() / a.sum()
return zi
def test_sosfilt_py():
import pickle
with open('butter_stopband((0, 5), (25, -1))_fs200.pk','rb') as f:
sos = pickle.load(f)
zi = pickle.load(f)
X = np.random.randn(10000,3)
print("X={} sos={}".format(X.shape,sos.shape))
Xsci = sosfilt(sos,X.copy(),-2)
Xpy = sosfilt_2d_py(sos,X.copy(),-2)
import matplotlib.pyplot as plt
plt.clf()
plt.subplot(411);plt.plot(X[:500,:]);plt.title('X')
plt.subplot(412);plt.plot(Xsci[:500,:]);plt.title('Xscipy')
plt.subplot(413);plt.plot(Xpy[:500,:]);plt.title('Xpy')
plt.subplot(414);plt.plot(Xsci-Xpy);plt.title('Xsci - Xpy')
# def butter_py(order,fc,fs,btype,output):
# ''' pure python butterworth filter synthesis '''
# if fc>=fs/2:
# error('fc must be less than fs/2')
# # I. Find poles of analog filter
# k= np.arange(order)
# theta= (2*k -1)*np.pi/(2*order);
# pa= -sin(theta) + j*cos(theta); # poles of filter with cutoff = 1 rad/s
# #
# # II. scale poles in frequency
# Fc= fs/np.pi * tan(np.pi*fc/fs); # continuous pre-warped frequency
# pa= pa*2*np.pi*Fc; # scale poles by 2*pi*Fc
# #
# # III. Find coeffs of digital filter
# # poles and zeros in the z plane
# p= (1 + pa/(2*fs))/(1 - pa/(2*fs)) # poles by bilinear transform
# q= -np.ones((1,N)); # zeros
# #
# # convert poles and zeros to polynomial coeffs
# a= poly(p); # convert poles to polynomial coeffs a
# a= real(a);
# b= poly(q); # convert zeros to polynomial coeffs b
# K= sum(a)/sum(b); # amplitude scale factor
# b= K*b;
if __name__=='__main__':
save_butter_sosfilt_coeff("sos_filter_coeff.pk")
#test_butter_sosfilt()
| 39.458182
| 183
| 0.595183
| 5,087
| 32,553
| 3.753489
| 0.155691
| 0.003876
| 0.003142
| 0.001885
| 0.170472
| 0.125327
| 0.103017
| 0.085053
| 0.071646
| 0.063685
| 0
| 0.026501
| 0.256966
| 32,553
| 824
| 184
| 39.506068
| 0.762899
| 0.342795
| 0
| 0.167702
| 0
| 0
| 0.039633
| 0.00101
| 0
| 0
| 0
| 0.002427
| 0
| 1
| 0.076605
| false
| 0.014493
| 0.024845
| 0.012422
| 0.175983
| 0.018634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eec118b9402f1ab3d9a333bb53d8180c1858ff75
| 2,100
|
py
|
Python
|
model/test.py
|
yacoubb/lang-classifier
|
d39a342cf8ad64b191ea235f9af3f833033f254a
|
[
"MIT"
] | 1
|
2019-07-03T11:28:55.000Z
|
2019-07-03T11:28:55.000Z
|
model/test.py
|
yacoubb/lang-classifier
|
d39a342cf8ad64b191ea235f9af3f833033f254a
|
[
"MIT"
] | null | null | null |
model/test.py
|
yacoubb/lang-classifier
|
d39a342cf8ad64b191ea235f9af3f833033f254a
|
[
"MIT"
] | null | null | null |
from tensorflow import keras
import os
import numpy as np
import sys
import json
sys.path.append("/".join(os.path.abspath(__file__).split("/")[:-2]))
from model.dataset import utils, test_sampler
def estimate_model_accuracy(model):
def predict(word):
word = utils.total_conversion(word)
word = word[: utils.max_word_length]
vector_word = utils.vectorize_word_2d(word)
vector_word = np.array([vector_word])
result = model.predict(vector_word)
return utils.vector_to_language(result, languages)
languages = []
with open("./RMS_model/metadata.json", "r") as metadata_file:
metadata = json.load(metadata_file)
languages = metadata["languages"]
print("starting sampler worker...")
test_sampler.get_sample(1000, languages)
test_words = {}
with open("./dataset/test_words.json", "r") as test_word_file:
test_words = json.load(test_word_file)
print("=" * 20 + " doing predictions " + "=" * 20)
results = []
word_predictions = []
for key in test_words:
print(key)
correct = 0.0
total = 0.0
for word in test_words[key]:
total += 1.0
prediction = predict(word)
word_predictions.append((word, prediction))
if predict(word) == key:
correct += 1.0
results.append((key, correct * 100.0 / total))
from tabulate import tabulate
summary = ""
summary += tabulate(results, headers=["language", "accuracy"])
summary += "\n"
summary += "overall accuracy: {:2f}".format(
sum(map(lambda x: x[1], results))
/ len(list(filter(lambda x: x[1] > 0, results)))
)
summary += "\n"
return summary, word_predictions
summary, all_predictions = estimate_model_accuracy(
keras.models.load_model("./RMS_model/model.h5")
)
print(summary)
with open("./RMS_model/testing.txt", "w+") as test_file:
test_file.write(summary)
test_file.write("=" * 20 + "\n")
for word, pred in all_predictions:
test_file.write(word + ", " + pred + "\n")
| 28.378378
| 68
| 0.623333
| 263
| 2,100
| 4.802281
| 0.334601
| 0.035629
| 0.030879
| 0.025337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01817
| 0.24
| 2,100
| 73
| 69
| 28.767123
| 0.773183
| 0
| 0
| 0.035088
| 0
| 0
| 0.097666
| 0.034778
| 0.017544
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.122807
| 0
| 0.192982
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eec22817edf6f5ff4caafda2c75d1273cb9edbb8
| 2,102
|
py
|
Python
|
crawler/crawler2.py
|
labcontext/image-inpainting-oldpaper
|
da4683a2c58d662e443ea24ab93fd9d8fcb96bda
|
[
"Apache-2.0"
] | null | null | null |
crawler/crawler2.py
|
labcontext/image-inpainting-oldpaper
|
da4683a2c58d662e443ea24ab93fd9d8fcb96bda
|
[
"Apache-2.0"
] | 3
|
2021-03-19T11:16:57.000Z
|
2022-01-13T02:18:17.000Z
|
crawler/crawler2.py
|
labcontext/image-inpainting-oldpaper
|
da4683a2c58d662e443ea24ab93fd9d8fcb96bda
|
[
"Apache-2.0"
] | null | null | null |
import requests
import urllib.request
import os
import pickle
import argparse
# file read folder
path = 'http://db.itkc.or.kr//data/imagedb/BOOK/ITKC_{0}/ITKC_{0}_{1}A/ITKC_{0}_{1}A_{2}{5}_{3}{4}.JPG'
# Manual
label = ['BT', 'MO']
middle = 1400
last = ['A', 'V'] # A ~400 V ~009
num = 10
num1 = 400
fin = ['A', 'B', 'H', 'L']
# file path, save path
# pad for number
def pad(num, width):
return '%0{}d'.format(width) % num
def save_picture(file_name, save_dir):
return urllib.request.urlretrieve(file_name, save_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--label', default='BT', type=str, help='BT, MO')
parser.add_argument('-f', '--fin', default='A', type=str, help='A,B,H,L')
opt = parser.parse_args()
# make directory
if not os.path.exists('oldDB'):
os.mkdir('oldDB')
if opt.label == 'BT':
for i in range(0, middle+1):
for k in range(num + 1):
for j in range(num1 + 1):
try:
p = path.format(opt.label, pad(i, 4),
'V', pad(j, 3), opt.fin, pad(k, 3))
print(p)
save_picture(p, './oldDB/{0}_{1}_{2}_{3}_{4}.jpg'.format(
opt.label, i, 'V', j, opt.fin))
except Exception as e:
print(str(e))
continue
elif opt.label == 'MO':
for i in range(0, middle+1):
for k in range(num1 + 1):
for j in range(num1 + 1):
try:
p = path.format(opt.label, pad(i, 4),
'A', pad(j, 3), opt.fin, pad(k, 3))
print(p)
save_picture(p, './oldDB/{0}_{1}_{2}_{3}_{4}.jpg'.format(
opt.label, i, 'A', j, opt.fin))
except Exception as e:
print(str(e))
continue
if __name__ == '__main__':
main()
| 26.275
| 103
| 0.460038
| 280
| 2,102
| 3.335714
| 0.325
| 0.051392
| 0.059957
| 0.038544
| 0.381156
| 0.381156
| 0.381156
| 0.381156
| 0.381156
| 0.381156
| 0
| 0.040738
| 0.381066
| 2,102
| 79
| 104
| 26.607595
| 0.677171
| 0.041865
| 0
| 0.352941
| 0
| 0.058824
| 0.114101
| 0.030892
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.098039
| 0.039216
| 0.196078
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeca3c40e6643d64e2cc7861e9484fa8ec9bd6f8
| 9,415
|
py
|
Python
|
main.py
|
Arnav-Ghatti/Tkinter-Money-Tracker
|
365dcafc78522d03062a8f062fa8167b9c015583
|
[
"MIT"
] | null | null | null |
main.py
|
Arnav-Ghatti/Tkinter-Money-Tracker
|
365dcafc78522d03062a8f062fa8167b9c015583
|
[
"MIT"
] | null | null | null |
main.py
|
Arnav-Ghatti/Tkinter-Money-Tracker
|
365dcafc78522d03062a8f062fa8167b9c015583
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import messagebox
import json
# Constants
FONT_NAME = "Open Sans"
BG_COLOR = "#f9f7f7"
FONT_COLOR = "#112d4e"
ACCENT = "#dbe2ef"
root = tk.Tk()
root.title("Money Tracker")
root.config(bg=BG_COLOR)
root.resizable(0, 0)
root.iconbitmap("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\money.ico")
transactions_history = {}
transactions = []
def set_listbox():
"""Refreshes the listbox"""
global listbox
listbox.delete(0, tk.END)
for item in transactions:
listbox.insert(tk.END, f"{item[0]} to {item[1]}, {clicked.get()}{item[2]}, {item[3]}")
def save_json(data):
"""Saves the date to C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json file"""
with open("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json", "w") as file:
json.dump(transactions_history, file, indent=4)
def check_fields():
if sender_input.get() == "" or reciever_input.get() == "" or desc_input.get("1.0", tk.END) == "":
return False
return True
def clear_fields():
sender_input.delete(0, tk.END)
reciever_input.delete(0, tk.END)
amount_input.delete(0, tk.END)
desc_input.delete("1.0", tk.END)
def add_transactions():
"""Adds transactios to the listbox"""
try:
check_int = int(amount_input.get())
except ValueError:
messagebox.showwarning(title="❌ Error ❌", message="Please enter only numbers in amount field")
return
if check_fields():
transactions.append([sender_input.get(), reciever_input.get(), amount_input.get(), desc_input.get("1.0", tk.END)])
transactions_history["Transactions"] = transactions
clear_fields()
save_json(transactions_history)
set_listbox()
else:
messagebox.showwarning(title="❌ Error ❌", message="Please do not leave any fields empty")
def delete_transaction():
"""Deletes transactions from the listbox"""
try:
del transactions[listbox.curselection()[0]]
except IndexError:
messagebox.showwarning(title="❌ Error ❌", message="Please select any item")
else:
transactions_history["Transactions"] = transactions
save_json(transactions_history)
set_listbox()
def load_transactions():
"""Loads data of transactions from the selected item in the listbox"""
try:
selected_idx = listbox.curselection()[0]
selected_item = transactions[selected_idx]
except IndexError:
messagebox.showwarning(title="❌ Error ❌", message="Please select any item")
else:
sender_var.set(selected_item[0])
reciever_var.set(selected_item[1])
amount_var.set(selected_item[2])
desc_input.delete("1.0", tk.END)
desc_input.insert(tk.END, selected_item[3])
def update_transactions():
"""Updates selected transaction to the details newly entered"""
if check_fields():
try:
transactions[listbox.curselection()[0]] = [sender_var.get(), reciever_var.get(), amount_var.get(), desc_input.get("1.0", tk.END)]
except IndexError:
messagebox.showwarning(title="❌ Error ❌", message="Please select any item")
else:
transactions_history["Transactions"] = transactions
save_json(transactions_history)
set_listbox()
else:
messagebox.showwarning(title="❌ Error ❌", message="Please do not leave any fields empty")
# Title
title = tk.Label(root, text="Money Tracker", font=(FONT_NAME, 15, "bold"), bg=BG_COLOR, highlightthickness=0, fg=FONT_COLOR)
title.grid(row=0, column=0, columnspan=2, pady=3)
# ---------------------------- ENTRIES AND LABELS ------------------------------- #
input_frame = tk.Frame(root, bg=BG_COLOR, highlightthickness=0)
input_frame.grid(row=1, column=0, sticky="N", padx=5)
# Sender
sender_label = tk.Label(input_frame, text="Sender: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
sender_label.grid(row=0, column=0, sticky="W", pady=5)
sender_var = tk.StringVar()
sender_input = tk.Entry(input_frame, textvariable=sender_var, width=36, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
sender_input.focus()
sender_input.grid(row=0, column=1, sticky="W", pady=5, padx=10, columnspan=2)
# Reciever
reciever_label = tk.Label(input_frame, text="Reciever: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
reciever_label.grid(row=1, column=0, sticky="W", pady=5)
reciever_var = tk.StringVar()
reciever_input = tk.Entry(input_frame, textvariable=reciever_var, width=36, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
reciever_input.grid(row=1, column=1, sticky="W", pady=5, padx=10, columnspan=2)
# Amount
amount_label = tk.Label(input_frame, text="Amount: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0)
amount_label.grid(row=2, column=0, sticky="W", pady=5)
amount_var = tk.StringVar()
amount_input = tk.Entry(input_frame, textvariable=amount_var, width=27, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
amount_input.grid(row=2, column=1, sticky="W", pady=5, padx=10)
# Description
desc_label = tk.Label(input_frame, text="Description: ", font=(FONT_NAME, 12, "normal"), bg=BG_COLOR, fg=FONT_COLOR, highlightthickness=0, bd=0)
desc_label.grid(row=3, column=0, sticky="N", pady=5)
desc_input = tk.Text(input_frame, width=36, height=12, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
desc_input.grid(row=3, column=1, sticky="W", pady=5, padx=10, columnspan=2)
currencies = [
"$",
"₹",
"€",
"£",
"¥"
]
clicked = tk.StringVar()
clicked.set("$")
currency = tk.OptionMenu(input_frame, clicked, *currencies)
currency.config(bg=ACCENT, fg=FONT_COLOR, bd=0, highlightthickness=0, font=(FONT_NAME, 10, "normal"))
currency["menu"].config(bg=ACCENT, fg=FONT_COLOR, bd=0, font=(FONT_NAME, 10, "normal"))
currency.grid(row=2, column=2)
# ---------------------------- BUTTONS ------------------------------- #
btn_frame = tk.Frame(root, bg=BG_COLOR, highlightthickness=0)
btn_frame.grid(row=2, column=0, padx=5, pady=5, sticky="N")
# Add
add_btn= tk.Button(btn_frame, text=" Add ", command=add_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
add_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Update
update_btn = tk.Button(btn_frame, text=" Update ", command=update_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
update_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Delete
del_btn = tk.Button(btn_frame, text=" Delete ", command=delete_transaction, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
del_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Load
load_btn = tk.Button(btn_frame, text=" Load ", command=load_transactions, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
load_btn.pack(side=tk.LEFT, padx=5, pady=5)
# Refresh
refresh_btn = tk.Button(btn_frame, text=" Refresh ", command=set_listbox, font=(FONT_NAME, 11, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
refresh_btn.pack(side=tk.LEFT, padx=5, pady=5)
# ---------------------------- LISTBOX ------------------------------- #
data_frame = tk.Frame(root, bg=ACCENT, highlightthickness=0)
data_frame.grid(row=1, column=1, rowspan=2)
# Scroll Bars
scroll_bar_y = tk.Scrollbar(data_frame, orient=tk.VERTICAL)
scroll_bar_x = tk.Scrollbar(data_frame, orient=tk.HORIZONTAL)
# Listbox
listbox = tk.Listbox(data_frame, height=18, width=50, yscrollcommand=scroll_bar_y.set, xscrollcommand=scroll_bar_x.set, font=(FONT_NAME, 12, "normal"), bg=ACCENT, fg=FONT_COLOR, highlightthickness=0, bd=0)
# Scroll Bars
scroll_bar_y.config(command=listbox.yview)
scroll_bar_y.pack(side=tk.RIGHT, fill=tk.Y)
scroll_bar_x.config(command=listbox.xview)
scroll_bar_x.pack(side=tk.BOTTOM, fill=tk.X)
listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# ---------------------------- STATUS BAR ------------------------------- #
status_frame = tk.LabelFrame(root, bd=0, relief=tk.SUNKEN, bg="#3f72af", highlightthickness=0)
status_frame.grid(sticky=tk.N+tk.S+tk.E+tk.W, columnspan=2)
# Made By
made_by = tk.Label(status_frame, text="Made By Arnav Ghatti", anchor=tk.E, font=(FONT_NAME, 9, "normal"), bg="#3f72af", highlightthickness=0, fg=BG_COLOR)
made_by.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1)
# Version
version_label = tk.Label(status_frame, text="Version: 2.5.3", anchor=tk.W, font=(FONT_NAME, 9, "normal"), bg="#3f72af", highlightthickness=0, fg=BG_COLOR)
version_label.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
def load_data():
"""Loads data from the C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json file to the listbox"""
global transactions, listbox
with open("C:\\Users\\ASUA\\Desktop\\Tests\\MoneyTransactionsOriginal\\history.json", "r") as file:
transaction_history = json.load(file)
transactions = transaction_history["Transactions"]
listbox.delete(0, tk.END)
for item in transactions:
listbox.insert(tk.END, f"{item[0]} to {item[1]}, ${item[2]}, {item[3]}")
load_data()
root.mainloop()
| 40.235043
| 205
| 0.683696
| 1,355
| 9,415
| 4.620664
| 0.149816
| 0.066763
| 0.036416
| 0.064846
| 0.562051
| 0.51733
| 0.429165
| 0.409998
| 0.38093
| 0.315605
| 0
| 0.025148
| 0.138396
| 9,415
| 233
| 206
| 40.407725
| 0.744699
| 0.088794
| 0
| 0.222222
| 0
| 0.013072
| 0.111542
| 0.027974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.019608
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eeca73f0a33396739525615f94801665b147bf27
| 12,725
|
py
|
Python
|
empire_cellular_automaton/dataset_processing.py
|
ThomasMiller01/ProofOfConcept
|
021bf29743309224628682d0f82b0be80ae83c95
|
[
"MIT"
] | 1
|
2019-12-18T13:49:22.000Z
|
2019-12-18T13:49:22.000Z
|
empire_cellular_automaton/dataset_processing.py
|
ThomasMiller01/Experiments
|
021bf29743309224628682d0f82b0be80ae83c95
|
[
"MIT"
] | null | null | null |
empire_cellular_automaton/dataset_processing.py
|
ThomasMiller01/Experiments
|
021bf29743309224628682d0f82b0be80ae83c95
|
[
"MIT"
] | 1
|
2021-08-29T09:22:52.000Z
|
2021-08-29T09:22:52.000Z
|
import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import time
def people_distribution_map(data, file):
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
generations = list(zip(unique, indices, counts))
plt_size_x = int(np.ceil(np.sqrt(len(generations))))
plt_size_y = int(np.ceil(np.sqrt(len(generations)) - 0.5))
fig, axs = plt.subplots(plt_size_x, plt_size_y, figsize=(10, 10))
fig.suptitle("people distribution", fontsize=10)
fig.tight_layout(pad=3.0)
i = 0
s_all = ()
s_mapped_all = None
for ax_s in axs:
for ax in ax_s:
if i < len(generations):
gen = generations[i]
minified_data = data[gen[1]:gen[1] + gen[2]]
all_people = np.zeros((0, 2)).astype('int')
for day in minified_data[:, 2]:
for person in day:
if person[5] != 0:
all_people = np.append(all_people, np.asarray(
[[person[6], person[7]]]), axis=0)
unique, counts = np.unique(
all_people, return_counts=True, axis=0)
x, y = zip(*unique)
if not s_all:
s_all = (counts.min(), counts.max())
s_mapped_all = np.interp(
counts, (s_all[0], s_all[1]), (0, 100))
s_mapped = np.interp(
counts, (counts.min(), counts.max()), (0, 100))
color_palett = [
'#d3ae1b', '#de6e3b', '#b54d47', '#8e321e', '#522a1a']
color_ranges = np.arange(
s_mapped_all.min(), s_mapped_all.max(), (s_mapped_all.max() - s_mapped_all.min()) / len(color_palett))
color_indices = [np.where(n < color_ranges)[0]
for n in s_mapped]
colors = [color_palett[c[0]] if c.size != 0 else color_palett[len(
color_palett) - 1] for c in color_indices]
img = plt.imread("map.jpg")
ax.scatter(x, y, s=s_mapped, c=colors)
ax_xlim = ax.get_xlim()
ax_ylim = ax.get_ylim()
ax.imshow(img, origin="lower")
ax.set_xlim(ax_xlim)
ax.set_ylim(ax_ylim[::-1])
ax.set(title="gen " + str(gen[0]))
i += 1
plt.savefig(file)
plt.close(fig=fig)
def kind_of_disease_per_generation(data, file):
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
generations = list(zip(unique, indices, counts))
plt_size_x = int(np.ceil(np.sqrt(len(generations))))
plt_size_y = int(np.ceil(np.sqrt(len(generations)) - 0.5))
fig, axs = plt.subplots(plt_size_x, plt_size_y, figsize=(10, 10))
fig.suptitle("kind of disease", fontsize=16)
fig.tight_layout(pad=3.0)
i = 0
for ax_s in axs:
for ax in ax_s:
if i < len(generations):
gen = generations[i]
minified_data = data[gen[1]:gen[1] + gen[2]]
all_diseased_people = np.zeros((0, 2)).astype('int')
for day in minified_data[:, 2]:
for person in day:
if person[5] != 0:
all_diseased_people = np.append(all_diseased_people, np.asarray(
[[person[0], person[5]]]), axis=0)
disease_all = np.zeros((0, 2)).astype('int')
for disease_kind in np.unique(all_diseased_people[:, 1]):
people_disease_kind = all_diseased_people[np.where(
all_diseased_people[:, 1] == disease_kind)[0]]
unique_disease_kind, counts_disease_kind = np.unique(
all_diseased_people, return_counts=True)
disease_all = np.append(disease_all, np.asarray(
[[disease_kind, len(unique_disease_kind)]]), axis=0)
x = np.arange(0, len(disease_all))
y = disease_all[:, 1]
ax.bar(x, y)
ax.set_xticks(x)
ax.set_yticks(y)
ax.set_xticklabels(disease_all[:, 0])
ax.set(title="gen " + str(gen[0]))
i += 1
plt.savefig(file)
plt.close(fig=fig)
def strength_distribution_per_generation(data, file):
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
generations = list(zip(unique, indices, counts))
plt_size_x = int(np.ceil(np.sqrt(len(generations))))
plt_size_y = int(np.ceil(np.sqrt(len(generations)) - 0.5))
fig, axs = plt.subplots(plt_size_x, plt_size_y, figsize=(10, 10))
fig.tight_layout(pad=3.0)
fig.suptitle("strength distribution", fontsize=12)
i = 0
for ax_s in axs:
for ax in ax_s:
if i < len(generations):
gen = generations[i]
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, 100)
y = np.zeros(100)
for strength in minified_data[:, 2][len(minified_data) - 1][:, 3]:
y[int(np.ceil(strength)) - 1] += 1
coeffs = np.polyfit(x, y, 3)
poly_eqn = np.poly1d(coeffs)
y_hat = poly_eqn(x)
ax.plot(x, y)
ax.plot(x, y_hat, label="average", c='r')
ax.set(xlabel='strength', ylabel='people',
title="gen " + str(gen[0]))
ax.grid()
i += 1
plt.savefig(file)
plt.close(fig=fig)
def age_distribution_per_generation(data, file):
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
generations = list(zip(unique, indices, counts))
plt_size_x = int(np.ceil(np.sqrt(len(generations))))
plt_size_y = int(np.ceil(np.sqrt(len(generations)) - 0.5))
fig, axs = plt.subplots(plt_size_x, plt_size_y, figsize=(10, 10))
fig.tight_layout(pad=3.0)
fig.suptitle("age distribution", fontsize=16)
i = 0
for ax_s in axs:
for ax in ax_s:
if i < len(unique):
gen = generations[i]
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, 100)
y = np.zeros(100)
for age in minified_data[:, 2][len(minified_data) - 1][:, 2]:
if age > 100:
age = 100
y[int(np.ceil(age)) - 1] += 1
coeffs = np.polyfit(x, y, 3)
poly_eqn = np.poly1d(coeffs)
y_hat = poly_eqn(x)
ax.plot(x, y)
ax.plot(x, y_hat, label="average", c='r')
ax.set(xlabel='age', ylabel='people',
title="gen " + str(gen[0]))
ax.grid()
i += 1
plt.savefig(file)
plt.close(fig=fig)
def disease_over_time(data, file):
fig, ax = plt.subplots()
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
for gen in zip(unique, indices, counts):
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, len(minified_data))
y = np.asarray([np.sum(x) for x in [a[:, 5]
for a in minified_data[:, 2]]])
ax.plot(x, y, label=gen[0])
ax.set(xlabel='days', ylabel='disease',
title='disease over time')
ax.grid()
plt.legend(loc="best", title="generation")
plt.savefig(file)
plt.close(fig=fig)
def avg_reproductionValue_over_time(data, file, settings):
fig, ax = plt.subplots()
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
for gen in zip(unique, indices, counts):
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, len(minified_data))
y = np.asarray(np.asarray([np.average(a[:, 4])
for a in minified_data[:, 2]]))
ax.plot(x, y, label=gen[0])
ax.axhline(settings['p_reproductionThreshold'],
c='r', linestyle=':', label='rT')
ax.set(xlabel='days', ylabel='reproductionValue',
title='avg reproductionValue over time')
ax.grid()
plt.legend(loc="best", title="generation")
plt.savefig(file)
plt.close(fig=fig)
def avg_age_over_time(data, file):
fig, ax = plt.subplots()
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
for gen in zip(unique, indices, counts):
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, len(minified_data))
y = np.asarray(np.asarray([np.average(a[:, 2])
for a in minified_data[:, 2]]))
ax.plot(x, y, label=gen[0])
ax.set(xlabel='days', ylabel='age',
title='avg age over time')
ax.grid()
plt.legend(loc="best", title="generation")
plt.savefig(file)
plt.close(fig=fig)
def population_over_time(data, file):
fig, ax = plt.subplots()
unique, indices, counts = np.unique(
data[:, 0], return_index=True, return_counts=True)
for gen in zip(unique, indices, counts):
minified_data = data[gen[1]:gen[1] + gen[2]]
x = np.arange(0, len(minified_data))
y = np.asarray([len(a) for a in minified_data[:, 2]])
ax.plot(x, y, label=gen[0])
ax.set(xlabel='days', ylabel='population',
title='population over time')
ax.grid()
plt.legend(loc="best", title="generation")
plt.savefig(file)
plt.close(fig=fig)
def save_figs(dataset_name):
start_all = time.time()
print("------")
print("saving " + dataset_name)
file_name = './datasets/' + dataset_name
print("loading data ...")
start = time.time()
# load data
with open(file_name + '/' + dataset_name + '_settings.json') as json_file:
settings = json.load(json_file)
data = np.load(file_name + '/' + dataset_name +
'_data.npy', allow_pickle=True)
end = time.time()
print("data loaded in " + str(round(end - start, 2)) + "s")
print("***")
start = time.time()
print("saving pdfs ...")
# save as pdf
try:
os.mkdir(file_name + "/pdf")
except:
pass
population_over_time(data, file_name + "/pdf/population_over_time.pdf")
avg_age_over_time(data, file_name + "/pdf/avg_age_over_time.pdf")
avg_reproductionValue_over_time(
data, file_name + "/pdf/avg_reproductionValue_over_time.pdf", settings)
disease_over_time(data, file_name + "/pdf/disease_over_time.pdf")
age_distribution_per_generation(
data, file_name + "/pdf/age_distribution_per_generation.pdf")
strength_distribution_per_generation(
data, file_name + "/pdf/strength_distribution_per_generation.pdf")
kind_of_disease_per_generation(
data, file_name + "/pdf/kind_of_disease.pdf")
people_distribution_map(
data, file_name + "/pdf/people_distribution_map.pdf")
end = time.time()
print("pdfs saved in " + str(round(end - start, 2)) + "s")
print("***")
print("saving pngs ...")
start = time.time()
# save as png
try:
os.mkdir(file_name + "/png")
except:
pass
population_over_time(data, file_name + "/png/population_over_time.png")
avg_age_over_time(data, file_name + "/png/avg_age_over_time.png")
avg_reproductionValue_over_time(
data, file_name + "/png/avg_reproductionValue_over_time.png", settings)
disease_over_time(data, file_name + "/png/disease_over_time.png")
age_distribution_per_generation(
data, file_name + "/png/age_distribution_per_generation.png")
strength_distribution_per_generation(
data, file_name + "/png/strength_distribution_per_generation.png")
kind_of_disease_per_generation(
data, file_name + "/png/kind_of_disease.png")
people_distribution_map(
data, file_name + "/png/people_distribution_map.png")
end = time.time()
print("pngs saved in " + str(round(end - start, 2)) + "s")
print("***")
end_all = time.time()
print("- " + dataset_name + " saved")
print("- time elapsed: " + str(round(end_all - start_all, 2)) + "s")
print("------")
if __name__ == "__main__":
for directory in os.listdir('./datasets'):
if "example" not in directory:
save_figs(directory)
print("creating statistics done")
| 37.985075
| 122
| 0.558428
| 1,697
| 12,725
| 3.998821
| 0.10607
| 0.028294
| 0.044798
| 0.028294
| 0.683319
| 0.62791
| 0.608606
| 0.537872
| 0.496169
| 0.487327
| 0
| 0.021815
| 0.301139
| 12,725
| 334
| 123
| 38.098802
| 0.741257
| 0.002593
| 0
| 0.558621
| 0
| 0
| 0.091819
| 0.043112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031034
| false
| 0.006897
| 0.02069
| 0
| 0.051724
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eed48753201aaf2076987680b987b0334df7af1f
| 4,653
|
py
|
Python
|
cliff/lister.py
|
tivaliy/cliff
|
a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3
|
[
"Apache-2.0"
] | 187
|
2015-01-13T04:07:41.000Z
|
2022-03-10T14:12:27.000Z
|
cliff/lister.py
|
tivaliy/cliff
|
a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3
|
[
"Apache-2.0"
] | 3
|
2016-01-05T20:52:55.000Z
|
2020-10-01T06:16:58.000Z
|
cliff/lister.py
|
tivaliy/cliff
|
a04a48f4f7dc72b1bcc95a5c6a550c7650e35ab3
|
[
"Apache-2.0"
] | 69
|
2015-02-01T01:28:37.000Z
|
2021-11-15T08:28:53.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Application base class for providing a list of data as output."""
import abc
import logging
from . import display
class Lister(display.DisplayCommandBase, metaclass=abc.ABCMeta):
"""Command base class for providing a list of data as output."""
log = logging.getLogger(__name__)
@property
def formatter_namespace(self):
return 'cliff.formatter.list'
@property
def formatter_default(self):
return 'table'
@property
def need_sort_by_cliff(self):
"""Whether sort procedure is performed by cliff itself.
Should be overridden (return False) when there is a need to implement
custom sorting procedure or data is already sorted.
"""
return True
@abc.abstractmethod
def take_action(self, parsed_args):
"""Run command.
Return a tuple containing the column names and an iterable containing
the data to be listed.
"""
def get_parser(self, prog_name):
parser = super(Lister, self).get_parser(prog_name)
group = self._formatter_group
group.add_argument(
'--sort-column',
action='append',
default=[],
dest='sort_columns',
metavar='SORT_COLUMN',
help=(
'specify the column(s) to sort the data (columns specified '
'first have a priority, non-existing columns are ignored), '
'can be repeated'
),
)
sort_dir_group = group.add_mutually_exclusive_group()
sort_dir_group.add_argument(
'--sort-ascending',
action='store_const',
dest='sort_direction',
const='asc',
help=('sort the column(s) in ascending order'),
)
sort_dir_group.add_argument(
'--sort-descending',
action='store_const',
dest='sort_direction',
const='desc',
help=('sort the column(s) in descending order'),
)
return parser
def produce_output(self, parsed_args, column_names, data):
if parsed_args.sort_columns and self.need_sort_by_cliff:
indexes = [
column_names.index(c) for c in parsed_args.sort_columns
if c in column_names
]
reverse = parsed_args.sort_direction == 'desc'
for index in indexes[::-1]:
try:
# We need to handle unset values (i.e. None) so we sort on
# multiple conditions: the first comparing the results of
# an 'is None' type check and the second comparing the
# actual value. The second condition will only be checked
# if the first returns True, which only happens if the
# returns from the 'is None' check on the two values are
# the same, i.e. both None or both not-None
data = sorted(
data, key=lambda k: (k[index] is None, k[index]),
reverse=reverse,
)
except TypeError:
# Simply log and then ignore this; sorting is best effort
self.log.warning(
"Could not sort on field '%s'; unsortable types",
parsed_args.sort_columns[index],
)
columns_to_include, selector = self._generate_columns_and_selector(
parsed_args, column_names,
)
if selector:
# Generator expression to only return the parts of a row
# of data that the user has expressed interest in
# seeing. We have to convert the compress() output to a
# list so the table formatter can ask for its length.
data = (
list(self._compress_iterable(row, selector)) for row in data
)
self.formatter.emit_list(
columns_to_include, data, self.app.stdout, parsed_args,
)
return 0
| 36.637795
| 78
| 0.585214
| 558
| 4,653
| 4.763441
| 0.397849
| 0.030098
| 0.021068
| 0.022573
| 0.094056
| 0.094056
| 0.058691
| 0.030098
| 0.030098
| 0.030098
| 0
| 0.001959
| 0.341715
| 4,653
| 126
| 79
| 36.928571
| 0.865818
| 0.338276
| 0
| 0.118421
| 0
| 0
| 0.138312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.039474
| 0.026316
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eed5699e06d3cac61b4a945b53a1004046c608f3
| 1,026
|
py
|
Python
|
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | 1
|
2020-08-05T08:06:33.000Z
|
2020-08-05T08:06:33.000Z
|
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | null | null | null |
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | null | null | null |
from PIL import Image
import numpy as np
# Works when launched from terminal
# noinspection PyUnresolvedReferences
from k_means import k_means
input_image_file = 'lena.jpg'
output_image_prefix = 'out_lena'
n_clusters = [2, 3, 5]
max_iterations = 100
launch_count = 3
def main():
# Read input image
image = np.array(Image.open(input_image_file))
X = image.reshape((image.shape[0] * image.shape[1], image.shape[2]))
for k in n_clusters:
print(f"{k} clusters")
# 'Compress' image using K-means
centroids, clustered = k_means(X, k=k, max_iterations=max_iterations, launch_count=launch_count)
new_X = np.array([centroids[cluster_index] for cluster_index in clustered])
new_X = new_X.astype(np.uint8)
# Write output image
new_image = new_X.reshape(image.shape)
output_image_name = f"{output_image_prefix}_{k}.jpg"
Image.fromarray(new_image).save(output_image_name)
print(f"Saved {output_image_name}")
print("Done.")
main()
| 27.72973
| 104
| 0.692008
| 151
| 1,026
| 4.470199
| 0.403974
| 0.097778
| 0.066667
| 0.059259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013398
| 0.199805
| 1,026
| 36
| 105
| 28.5
| 0.80877
| 0.132554
| 0
| 0
| 0
| 0
| 0.098416
| 0.032805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eed63ef06321c79002e85fdaeb08205c4299ea39
| 3,389
|
py
|
Python
|
dcrnn_train.py
|
syin3/cs224w-traffic
|
284836b49404bfd38ae23b31f89f8e617548e286
|
[
"MIT"
] | 9
|
2019-03-20T01:02:07.000Z
|
2020-11-25T06:45:30.000Z
|
dcrnn_train.py
|
syin3/cs224w-traffic
|
284836b49404bfd38ae23b31f89f8e617548e286
|
[
"MIT"
] | null | null | null |
dcrnn_train.py
|
syin3/cs224w-traffic
|
284836b49404bfd38ae23b31f89f8e617548e286
|
[
"MIT"
] | 2
|
2020-09-24T07:03:58.000Z
|
2020-11-09T04:43:03.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import yaml
from model.dcrnn_supervisor import DCRNNSupervisor
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
tf_config = tf.ConfigProto()
# if args.use_cpu_only:
# tf_config = tf.ConfigProto(device_count={'GPU': 0})
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
supervisor = DCRNNSupervisor(**supervisor_config)
supervisor.train(sess=sess)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', required=True, default=None, type=str, help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set true to only use cpu.')
# adjacent and distance-weighted
parser.add_argument('--weightType', required=True, choices=['a', 'd'], help='w/ or w/o distance pre-processing')
parser.add_argument('--att', dest='attention', action='store_true', help='Call this command to raise attention mechanism in the training.')
parser.add_argument('--no-att', dest='attention', action='store_false', help='Call this command not to raise attention mechanism in the training.')
parser.set_defaults(attention=False)
subparsers = parser.add_subparsers()
fullyConnectParser = subparsers.add_parser('fc', help='In fully connect mode, choose embed file')
fullyConnectParser.add_argument('--gEmbedFile', required=True, default='LA-n2v-14-0.1-1', help='Embedding file for n2v, should add up-directory when calling')
fullyConnectParser.add_argument('--network', nargs='?', const='fc', default='fc', help='To store the choice of fully connected')
graphConvParser = subparsers.add_parser('graphConv', help='In graph conv mode, choose W matrix form')
graphConvParser.add_argument('--hop', required=True, type=int, default=2,
help='k-hop neighbors, default is 2 for distance-processed matrix; but must be one for binary matrix')
graphConvParser.add_argument('--network', nargs='?', const='gconv', default='gconv', help='To store the choice of gconv')
args = parser.parse_args()
with open(args.config_filename) as f:
doc = yaml.load(f)
# default batch sizes to 64, in training, validation and in testing
doc['data']['batch_size'] = 64
doc['data']['test_batch_size'] = 64
doc['data']['val_batch_size'] = 64
# set matrix to adjacency or distance-weighted
if args.weightType == 'd':
doc['data']['graph_pkl_filename'] = "data/sensor_graph/adj_mx_la.pkl"
else:
doc['data']['graph_pkl_filename'] = "data/sensor_graph/adj_bin_la.pkl"
# record necessary info to log
doc['model']['weightMatrix'] = args.weightType
doc['model']['attention'] = args.attention
doc['model']['network'] = args.network
if 'gEmbedFile' in vars(args):
doc['model']['graphEmbedFile'] = args.gEmbedFile
doc['model']['max_diffusion_step'] = 0
if 'hop' in vars(args):
doc['model']['max_diffusion_step'] = args.hop
# save the info
with open(args.config_filename, 'w') as f:
yaml.dump(doc, f)
main(args)
| 42.3625
| 162
| 0.689584
| 452
| 3,389
| 5.004425
| 0.360619
| 0.043767
| 0.037577
| 0.023873
| 0.233422
| 0.123784
| 0.104332
| 0.104332
| 0.036251
| 0
| 0
| 0.006832
| 0.179404
| 3,389
| 79
| 163
| 42.898734
| 0.806544
| 0.077309
| 0
| 0.038462
| 0
| 0.019231
| 0.319974
| 0.020199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.134615
| 0
| 0.153846
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eed698cee32da7af7d7cb366130b591986c4feae
| 1,035
|
py
|
Python
|
train.py
|
k2sebeom/DeepLOLCourt
|
630f1eee1729c06f686abc7c2a7ecbdfe66803b3
|
[
"MIT"
] | null | null | null |
train.py
|
k2sebeom/DeepLOLCourt
|
630f1eee1729c06f686abc7c2a7ecbdfe66803b3
|
[
"MIT"
] | null | null | null |
train.py
|
k2sebeom/DeepLOLCourt
|
630f1eee1729c06f686abc7c2a7ecbdfe66803b3
|
[
"MIT"
] | null | null | null |
import torch.optim as optim
from torch import nn
from data.match_dataset import MatchDataset
from torch.utils.data import DataLoader
from models.lol_result_model import LOLResultModel
import torch
if __name__ == '__main__':
EPOCH = 50
BATCH_SIZE = 32
loader = DataLoader(MatchDataset('dataset/train_data.csv'), BATCH_SIZE)
print("Dataset Loaded")
loss_criterion = nn.BCELoss()
device = torch.device('cuda:0')
model = LOLResultModel(190)
print("Model created")
optimizer = optim.Adam(model.parameters(), lr=0.0001)
model.to(device)
for epoch in range(EPOCH):
loss_data = 0
for i, data in enumerate(loader):
output = model(data['x'].to(device))
loss = loss_criterion(output, data['y'].unsqueeze(1).float().to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_data = loss.data
print(f'Epoch {epoch}: {loss_data}')
torch.save(model.state_dict(), 'checkpoints/model.pth')
| 30.441176
| 84
| 0.656039
| 132
| 1,035
| 4.984848
| 0.492424
| 0.048632
| 0.039514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018703
| 0.225121
| 1,035
| 33
| 85
| 31.363636
| 0.801746
| 0
| 0
| 0
| 0
| 0
| 0.108213
| 0.041546
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|