hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2346b7d4b689aedf70be90e22366c7d461f0ff5d
| 1,479
|
py
|
Python
|
mupub/tests/test_utils.py
|
MutopiaProject/mupub
|
8c59ae15ea13af14139570fcccfef850e1363548
|
[
"MIT"
] | null | null | null |
mupub/tests/test_utils.py
|
MutopiaProject/mupub
|
8c59ae15ea13af14139570fcccfef850e1363548
|
[
"MIT"
] | 1
|
2017-02-22T17:33:23.000Z
|
2017-02-23T10:02:48.000Z
|
mupub/tests/test_utils.py
|
MutopiaProject/mupub
|
8c59ae15ea13af14139570fcccfef850e1363548
|
[
"MIT"
] | null | null | null |
"""Util module tests
"""
import os.path
from unittest import TestCase
import mupub
from clint.textui.validators import ValidationError
from .tutils import PREFIX
_SIMPLE_PATH = os.path.join(PREFIX, 'SorF', 'O77', 'sorf-o77-01',)
_LYS_PATH = os.path.join(PREFIX, 'PaganiniN', 'O1', 'Caprice_1',)
class UtilsTest(TestCase):
"""Utils testing"""
def test_find(self):
"""Find files (for zipping ly files)"""
here = os.getcwd()
try:
os.chdir(_SIMPLE_PATH)
flist = mupub.utils.find_files('.')
self.assertEqual(len(flist), 2)
finally:
os.chdir(here)
def test_resolve(self):
"""Resolving file input"""
here = os.getcwd()
try:
for test_path in [_SIMPLE_PATH, _LYS_PATH,]:
os.chdir(test_path)
base,infile = mupub.utils.resolve_input()
self.assertEqual(base, os.path.basename(test_path))
self.assertIsNotNone(infile)
finally:
os.chdir(here)
def test_bools(self):
boolv = mupub.utils.BooleanValidator('some message')
boolv_nom = mupub.utils.BooleanValidator()
self.assertTrue(boolv('y'), 'y is True')
self.assertFalse(boolv('n'), 'n is False')
self.assertTrue(not boolv_nom('N'), 'not N is True')
with self.assertRaises(ValidationError):
if boolv('x'):
self.assertFail('should not be here!')
| 30.183673
| 67
| 0.593644
| 176
| 1,479
| 4.869318
| 0.4375
| 0.028005
| 0.023337
| 0.032672
| 0.105018
| 0.058343
| 0
| 0
| 0
| 0
| 0
| 0.008419
| 0.277214
| 1,479
| 48
| 68
| 30.8125
| 0.793265
| 0.058147
| 0
| 0.228571
| 0
| 0
| 0.077316
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 1
| 0.085714
| false
| 0
| 0.142857
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23479c6aeea396d6cdcce0a007d798ea7a728144
| 2,736
|
py
|
Python
|
routemaster/cli.py
|
thread/routemaster
|
1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41
|
[
"MIT"
] | 13
|
2018-01-16T14:26:27.000Z
|
2022-03-19T12:43:17.000Z
|
routemaster/cli.py
|
thread/routemaster
|
1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41
|
[
"MIT"
] | 86
|
2018-01-03T17:00:56.000Z
|
2021-12-06T12:58:06.000Z
|
routemaster/cli.py
|
thread/routemaster
|
1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41
|
[
"MIT"
] | 3
|
2018-02-21T23:13:45.000Z
|
2022-03-19T12:43:23.000Z
|
"""CLI handling for `routemaster`."""
import logging
import yaml
import click
import layer_loader
from routemaster.app import App
from routemaster.cron import CronThread
from routemaster.config import ConfigError, load_config
from routemaster.server import server
from routemaster.middleware import wrap_application
from routemaster.validation import ValidationError, validate_config
from routemaster.gunicorn_application import GunicornWSGIApplication
logger = logging.getLogger(__name__)
@click.group()
@click.option(
'-c',
'--config-file',
'config_files',
help="Path to the service config file.",
type=click.File(encoding='utf-8'),
required=True,
multiple=True,
)
@click.pass_context
def main(ctx, config_files):
"""Shared entrypoint configuration."""
logging.getLogger('schedule').setLevel(logging.CRITICAL)
config_data = layer_loader.load_files(
config_files,
loader=yaml.load,
)
try:
config = load_config(config_data)
except ConfigError:
logger.exception("Configuration Error")
click.get_current_context().exit(1)
ctx.obj = App(config)
_validate_config(ctx.obj)
@main.command()
@click.pass_context
def validate(ctx):
"""
Entrypoint for validation of configuration files.
Validation is done by the main handler in order to cover all code paths,
so this function is a stub so that `serve` does not have to be called.
"""
pass
@main.command()
@click.option(
'-b',
'--bind',
help="Bind address and port.",
type=str,
default='[::]:2017',
)
@click.option(
'--debug/--no-debug',
help="Enable debugging mode.",
default=False,
)
@click.option(
'--workers',
help="Number of gunicorn workers to run.",
type=int,
default=1,
)
@click.pass_context
def serve(ctx, bind, debug, workers): # pragma: no cover
"""Entrypoint for serving the Routemaster HTTP service."""
app = ctx.obj
server.config.app = app
if debug:
server.config['DEBUG'] = True
cron_thread = CronThread(app)
cron_thread.start()
wrapped_server = wrap_application(app, server)
def post_fork():
app.initialise()
app.logger.init_flask(server)
try:
instance = GunicornWSGIApplication(
wrapped_server,
bind=bind,
debug=debug,
workers=workers,
post_fork=post_fork,
)
instance.run()
finally:
cron_thread.stop()
def _validate_config(app: App):
try:
validate_config(app, app.config)
except ValidationError as e:
msg = f"Validation Error: {e}"
logger.exception(msg)
click.get_current_context().exit(1)
| 23.186441
| 76
| 0.665205
| 327
| 2,736
| 5.446483
| 0.391437
| 0.058956
| 0.026951
| 0.032004
| 0.03032
| 0.03032
| 0
| 0
| 0
| 0
| 0
| 0.003775
| 0.225512
| 2,736
| 117
| 77
| 23.384615
| 0.836715
| 0.120614
| 0
| 0.157303
| 0
| 0
| 0.100972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0.044944
| 0.123596
| 0
| 0.179775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2348e1dd77f2ba0e869197de55900d212aa3c556
| 965
|
py
|
Python
|
grid_sticky_example_3.py
|
crazcalm/learn_tkinter_canvas
|
b798a6f2217a478e9222bb6eaa2afec3d28a2758
|
[
"MIT"
] | null | null | null |
grid_sticky_example_3.py
|
crazcalm/learn_tkinter_canvas
|
b798a6f2217a478e9222bb6eaa2afec3d28a2758
|
[
"MIT"
] | 2
|
2020-02-14T02:14:26.000Z
|
2020-02-14T02:15:58.000Z
|
grid_sticky_example_3.py
|
crazcalm/learn_tkinter_canvas
|
b798a6f2217a478e9222bb6eaa2afec3d28a2758
|
[
"MIT"
] | 1
|
2021-11-24T13:00:34.000Z
|
2021-11-24T13:00:34.000Z
|
"""
When a widget is positioned with sticky,
the size of the widget itself is just big
enough to contain any text and other
contents inside of it. It won’t fill the
entire grid cell. In order to fill the
grid, you can specify "ns" to force the
widget to fill the cell in the vertical
direction, or "ew" to fill the cell in the
vertical direction. To fill the entire
cell, set sticky to "nsew". The following
example illustrates each of these options:
"""
import tkinter as tk
window = tk.Tk()
window.rowconfigure(0, minsize=50)
window.columnconfigure([0, 1, 2, 3], minsize=50)
label1 = tk.Label(text="1", bg="black", fg="white")
label2 = tk.Label(text="2", bg="black", fg="white")
label3 = tk.Label(text="3", bg="black", fg="white")
label4 = tk.Label(text="4", bg="black", fg="white")
label1.grid(row=0, column=0)
label2.grid(row=0, column=1, sticky="ew")
label3.grid(row=0, column=2, sticky="ns")
label4.grid(row=0, column=3, sticky="nsew")
window.mainloop()
| 30.15625
| 51
| 0.71399
| 172
| 965
| 4.005814
| 0.430233
| 0.050798
| 0.05225
| 0.081277
| 0.101597
| 0.101597
| 0.101597
| 0.101597
| 0
| 0
| 0
| 0.034982
| 0.140933
| 965
| 32
| 52
| 30.15625
| 0.79614
| 0.462176
| 0
| 0
| 0
| 0
| 0.101563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
234e920fdc139ffec693a188e6071590ea84ef74
| 20,151
|
py
|
Python
|
praatio/pitch_and_intensity.py
|
timmahrt/praatIO
|
000d0477fffb033b63d54311fac5c913157a59a6
|
[
"MIT"
] | 208
|
2016-04-20T12:42:05.000Z
|
2022-03-25T13:44:03.000Z
|
praatio/pitch_and_intensity.py
|
timmahrt/praatIO
|
000d0477fffb033b63d54311fac5c913157a59a6
|
[
"MIT"
] | 37
|
2017-10-31T15:22:59.000Z
|
2022-01-02T02:55:46.000Z
|
praatio/pitch_and_intensity.py
|
timmahrt/praatIO
|
000d0477fffb033b63d54311fac5c913157a59a6
|
[
"MIT"
] | 33
|
2016-05-09T07:34:22.000Z
|
2022-03-30T09:00:58.000Z
|
# coding: utf-8
"""
Functions for working with pitch data
This file depends on the praat script get_pitch_and_intensity.praat
(which depends on praat) to extract pitch and intensity values from
audio data. Once the data is extracted, there are functions for
data normalization and calculating various measures from the time
stamped output of the praat script (ie **generatePIMeasures()**)
For brevity, 'pitch_and_intensity' is referred to as 'PI'
see **examples/get_pitch_and_formants.py**
"""
import os
from os.path import join
import io
import math
from typing import List, Tuple, Optional, cast
from praatio import data_points
from praatio import praatio_scripts
from praatio import textgrid
from praatio.utilities import errors
from praatio.utilities import my_math
from praatio.utilities import utils
from praatio.utilities.constants import Point
HERTZ = "Hertz"
UNSPECIFIED = "unspecified"
_PITCH_ERROR_TIER_NAME = "pitch errors"
def _extractPIPiecewise(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
tgFN: str,
tierName: str,
tmpOutputPath: str,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(
inputFN, tgFN, tierName, tmpOutputPath, False
)
allPIList: List[Tuple[str, str, str]] = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(
join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
convertedPiList = [
("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList
]
allPIList.extend(convertedPiList)
outputData = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(outputData) + "\n")
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def _extractPIFile(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
argList = [
inputFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
pitchUnit,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractIntensity(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
sampleStep: float = 0.01,
forceRegenerate: bool = True,
undefinedValue: float = None,
) -> List[Tuple[float, ...]]:
"""
Extract the intensity for an audio file
Calculates intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep, minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPitchTier(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> data_points.PointObject2D:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return data_points.open2DPointObject(outputFN)
def extractPitch(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPI(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
tgFN: str = None,
tierName: str = None,
tmpOutputPath: str = None,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
pitchUnit: "Hertz", "semitones re 100 Hz", etc
Calculates pitch and intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
tgFN,
tierName,
tmpOutputPath,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
return piList
def loadTimeSeriesData(
fn: str, undefinedValue: float = None
) -> List[Tuple[float, ...]]:
"""
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
"""
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding="utf-8") as fd:
data = fd.read()
except IOError:
print(f"No pitch track for: {name}")
raise
dataList = [row.split(",") for row in data.splitlines() if row != ""]
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [
time,
]
doSkip = False
for value in row:
if "--" in value:
if undefinedValue is not None:
appendValue = undefinedValue
else:
doSkip = True
break
else:
appendValue = float(value)
entry.append(appendValue)
if doSkip is True:
continue
newDataList.append(tuple(entry))
return newDataList
def generatePIMeasures(
dataList: List[Tuple[float, float, float]],
tgFN: str,
tierName: str,
doPitch: bool,
medianFilterWindowSize: int = None,
globalZNormalization: bool = False,
localZNormalizationWindowSize: int = 0,
) -> List[Tuple[float, ...]]:
"""
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
medianFilterWindowSize: if none, no filtering is done
globalZNormalization: if True, values are normalized with the mean
and stdDev of the data in dataList
localZNormalization: if greater than 1, values are normalized with the mean
and stdDev of the local context (for a window of 5, it
would consider the current value, 2 values before and 2
values after)
"""
# Warn user that normalizing a second time nullifies the first normalization
if globalZNormalization is True and localZNormalizationWindowSize > 0:
raise errors.NormalizationException()
castDataList = cast(List[Tuple[float, ...]], dataList)
if globalZNormalization is True:
if doPitch:
castDataList = my_math.znormalizeSpeakerData(castDataList, 1, True)
else:
castDataList = my_math.znormalizeSpeakerData(castDataList, 2, True)
# Raw values should have 0 filtered; normalized values are centered around 0, so don't filter
filterZeroFlag = not globalZNormalization
tg = textgrid.openTextgrid(tgFN, False)
if not isinstance(tg.tierDict[tierName], textgrid.IntervalTier):
raise errors.IncompatibleTierError(tg.tierDict[tierName])
tier = cast(textgrid.IntervalTier, tg.tierDict[tierName])
piData = tier.getValuesInIntervals(castDataList)
outputList: List[List[float]] = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(
tmpValList, tgFN, label, medianFilterWindowSize, filterZeroFlag
)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
if filterZeroFlag:
tmpValList = [
intensityVal for intensityVal in tmpValList if intensityVal != 0.0
]
rmsIntensity = 0.0
if len(tmpValList) != 0:
rmsIntensity = my_math.rms(tmpValList)
outputList.append(
[
rmsIntensity,
]
)
# Locally normalize the output
if localZNormalizationWindowSize > 0 and len(outputList) > 0:
for colI in range(len(outputList[0])):
featValList = [row[colI] for row in outputList]
featValList = my_math.znormWindowFilter(
featValList, localZNormalizationWindowSize, True, True
)
if len(featValList) != len(outputList): # This should hopefully not happen
raise errors.UnexpectedError(
"Lists must be of the same length but are not: "
f"({len(featValList)}), ({len(outputList)})"
)
for i, val in enumerate(featValList):
outputList[i][colI] = val
return [tuple(row) for row in outputList]
def getPitchMeasures(
f0Values: List[float],
name: str = None,
label: str = None,
medianFilterWindowSize: int = None,
filterZeroFlag: bool = False,
) -> Tuple[float, float, float, float, float, float]:
"""
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
"""
if name is None:
name = UNSPECIFIED
if label is None:
label = UNSPECIFIED
if medianFilterWindowSize is not None:
f0Values = my_math.medianFilter(
f0Values, medianFilterWindowSize, useEdgePadding=True
)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = f"No pitch data for file: {name}, label: {label}"
print(myStr.encode("ascii", "replace"))
counts = 0.0
meanF0 = 0.0
maxF0 = 0.0
minF0 = 0.0
rangeF0 = 0.0
variance = 0.0
std = 0.0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(
pitchList: List[Tuple[float, float]],
maxJumpThreshold: float = 0.70,
tgToMark: Optional[textgrid.Textgrid] = None,
) -> Tuple[List[Point], Optional[textgrid.Textgrid]]:
"""
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
"""
if maxJumpThreshold < 0 or maxJumpThreshold > 1:
raise errors.ArgumentError(
f"'maxJumpThreshold' must be between 0 and 1. Was given ({maxJumpThreshold})"
)
tierName = _PITCH_ERROR_TIER_NAME
if tgToMark is not None and tierName in tgToMark.tierNameList:
raise errors.ArgumentError(
f"Tier name '{tierName}' is already in provided textgrid"
)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if (lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff):
currentTime = pitchList[i][0]
errorList.append(Point(currentTime, str(currentPitch / lastPitch)))
if tgToMark is not None:
pointTier = textgrid.PointTier(
tierName, errorList, tgToMark.minTimestamp, tgToMark.maxTimestamp
)
tgToMark.addTier(pointTier)
return errorList, tgToMark
| 31.193498
| 97
| 0.626966
| 2,151
| 20,151
| 5.83682
| 0.194793
| 0.01147
| 0.013381
| 0.011947
| 0.472003
| 0.430346
| 0.41362
| 0.396734
| 0.39315
| 0.387893
| 0
| 0.012397
| 0.29145
| 20,151
| 645
| 98
| 31.24186
| 0.866928
| 0.234182
| 0
| 0.516432
| 0
| 0
| 0.043626
| 0.003325
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023474
| false
| 0
| 0.028169
| 0
| 0.075117
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
234efbd93d84cd1c579cc2b9b03be2e426d9604e
| 1,488
|
py
|
Python
|
keras_classifier.py
|
03pie/SMPCUP2017
|
956f97fce8620b3b0c35e6b3757347ede30c64ba
|
[
"MIT"
] | 25
|
2017-11-08T08:56:45.000Z
|
2021-11-24T20:24:37.000Z
|
keras_classifier.py
|
03pie/SMPCUP2017
|
956f97fce8620b3b0c35e6b3757347ede30c64ba
|
[
"MIT"
] | null | null | null |
keras_classifier.py
|
03pie/SMPCUP2017
|
956f97fce8620b3b0c35e6b3757347ede30c64ba
|
[
"MIT"
] | 13
|
2017-12-11T05:47:52.000Z
|
2021-03-04T13:53:41.000Z
|
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
# return the best three results
def top_n(matrix_prob, label_map):
ans = []
for line in matrix_prob:
rank = [label_map[item[0]] for item in sorted(enumerate(line), key=lambda v:v[1], reverse=True)]
ans.append(rank[:3])
return ans
# basic neural network model
def basic_model():
model = Sequential()
model.add(Dense(output_dim=500, input_dim=100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(output_dim=42, input_dim=500, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
if __name__ == '__main__':
X = pd.read_csv('./data/triple_train_x_mean.txt', header=None, encoding='utf-8')
Y = pd.read_csv('./data/triple_train_y.txt', header=None, encoding='utf-8')
X_test = pd.read_csv('./data/triple_test_x_mean.txt', header=None, encoding='utf-8')
matrix_y = np_utils.to_categorical(Y,42)
# KerasClassifier analysis
classifier = KerasClassifier(build_fn=basic_model, nb_epoch=10, batch_size=500)
classifier.fit(X, Y)
pred_prob = classifier.predict_proba(X_test)
with open('./model/task2_label_space.txt', encoding='utf-8') as flabel:
label_map = flabel.read().split()
pd.DataFrame(top_n(pred_prob, label_map)).to_csv('./data/task2_ans_int_index.txt', index=None, header=None, encoding='utf-8')
| 40.216216
| 126
| 0.755376
| 238
| 1,488
| 4.5
| 0.462185
| 0.051354
| 0.056022
| 0.078431
| 0.203548
| 0.124183
| 0.056022
| 0.056022
| 0
| 0
| 0
| 0.022405
| 0.100134
| 1,488
| 36
| 127
| 41.333333
| 0.777446
| 0.054435
| 0
| 0
| 0
| 0
| 0.158945
| 0.119031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
234f3d49dc75338604b163336e34c3247e009fb7
| 2,012
|
py
|
Python
|
greening/get_tiles_from_google_maps.py
|
uchr/Hackathon-Urbaton
|
83362fec9777054050c858eda87905c8b512372a
|
[
"MIT"
] | null | null | null |
greening/get_tiles_from_google_maps.py
|
uchr/Hackathon-Urbaton
|
83362fec9777054050c858eda87905c8b512372a
|
[
"MIT"
] | null | null | null |
greening/get_tiles_from_google_maps.py
|
uchr/Hackathon-Urbaton
|
83362fec9777054050c858eda87905c8b512372a
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
import time
import requests
import shutil
def get_route_tile(x, y, out_file):
#http://mt1.google.com/vt/lyrs=y&x=5975&y=2598&z=13
url = 'http://mt1.google.com/vt/lyrs=y&x={}&y={}&z=13'.format(x, y)
response = requests.get(url, stream=True)
with open(out_file, 'wb') as file:
shutil.copyfileobj(response.raw, file)
del response
def union(all_x, all_y, path):
x_layers = []
for x_index in range(all_x):
file_path = os.path.join(path, "_".join(map(str, [x_index, 0])))
print(file_path)
img = cv2.imread(file_path)
for y_index in range(1, all_y):
file_path = os.path.join(path, "_".join(map(str, [x_index, y_index])))
print(file_path)
if os.path.exists(file_path) and os.path.isfile(file_path):
print(img.shape)
img = np.concatenate((img, cv2.imread(file_path)), axis=0)
else:
print("fail")
break
x_layers.append(img)
final_image = x_layers[0]
for layer in range(1, all_x):
final_image = np.concatenate((final_image, x_layers[layer]), axis=1)
cv2.imwrite(os.path.join(path, 'map.png'), final_image)
return final_image
def main():
"""
https://api.openstreetmap.org/api/0.6/map?bbox=82.54715,54.839455,83.182984,55.103517
https://sat02.maps.yandex.net/tiles?l=sat&v=3.465.0&x=2989&y=1297&z=12&lang=ru_RU
"""
city_min_x = 5975
city_max_x = 5989
city_min_y = 2582
city_max_y = 2597
all_x = city_max_x - city_min_x + 1
all_y = city_max_y - city_min_y + 1
path = './google_tiles_' + str(13) + '/'
for x_index in range(5975, 5990):
for y_index in range(2582, 2598):
file_name = os.path.join(path, "_".join(map(str, [x_index, y_index])) + '.png')
get_route_tile(x_index, y_index, file_name)
time.sleep(0.1)
final_image = union(all_x, all_y, path)
if __name__ == '__main__':
main()
| 30.029851
| 89
| 0.614811
| 331
| 2,012
| 3.507553
| 0.332326
| 0.055125
| 0.041344
| 0.048234
| 0.261843
| 0.172265
| 0.14298
| 0.14298
| 0.101637
| 0.101637
| 0
| 0.072869
| 0.236084
| 2,012
| 67
| 90
| 30.029851
| 0.682498
| 0.10835
| 0
| 0.040816
| 0
| 0.020408
| 0.050704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.122449
| 0
| 0.204082
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23549ec1228d9e42823643453e7b9895b370ca45
| 1,933
|
py
|
Python
|
reVX/utilities/cluster_methods.py
|
NREL/reVX
|
4d62eb2c003c3b53b959f7a58bdc342d18098884
|
[
"BSD-3-Clause"
] | 7
|
2020-04-06T00:29:55.000Z
|
2022-01-23T20:00:14.000Z
|
reVX/utilities/cluster_methods.py
|
NREL/reVX
|
4d62eb2c003c3b53b959f7a58bdc342d18098884
|
[
"BSD-3-Clause"
] | 67
|
2020-02-28T20:15:35.000Z
|
2022-03-31T21:34:52.000Z
|
reVX/utilities/cluster_methods.py
|
NREL/reVX
|
4d62eb2c003c3b53b959f7a58bdc342d18098884
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Clustering Methods
"""
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
class ClusteringMethods:
""" Base class of clustering methods """
@staticmethod
def _normalize_values(arr, norm=None, axis=None):
"""
Normalize values in array by column
Parameters
----------
arr : ndarray
ndarray of values extracted from meta
shape (n samples, with m features)
norm : str
Normalization method to use (see sklearn.preprocessing.normalize)
if None range normalize
axis : int
Axis to normalize along
Returns
---------
arr : ndarray
array with values normalized by column
shape (n samples, with m features)
"""
if norm:
arr = normalize(arr, norm=norm, axis=axis)
else:
if np.issubdtype(arr.dtype, np.integer):
arr = arr.astype(float)
min_all = arr.min(axis=axis)
max_all = arr.max(axis=axis)
range_all = max_all - min_all
if axis is not None:
pos = range_all == 0
range_all[pos] = 1
arr -= min_all
arr /= range_all
return arr
@staticmethod
def kmeans(data, **kwargs):
""" Cluster based on kmeans methodology """
kmeans = KMeans(random_state=0, **kwargs)
results = kmeans.fit(data)
labels = results.labels_
# Create deterministic cluster labels based on size
label_n, l_size = np.unique(labels, return_counts=True)
idx = np.argsort(l_size)
l_mapping = dict(zip(label_n[idx], label_n))
sorted_labels = labels.copy()
for k, v in l_mapping.items():
sorted_labels[labels == k] = v
return sorted_labels
| 27.225352
| 77
| 0.562338
| 224
| 1,933
| 4.745536
| 0.428571
| 0.030103
| 0.024459
| 0.031985
| 0.048918
| 0.048918
| 0
| 0
| 0
| 0
| 0
| 0.003172
| 0.347646
| 1,933
| 70
| 78
| 27.614286
| 0.83981
| 0.299534
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23585aa3fd91ad92d3f8755c7797b9e71281a6bc
| 918
|
py
|
Python
|
Unit3/Lesson7.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
Unit3/Lesson7.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
Unit3/Lesson7.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
#Python的内建模块itertools提供了非常有用的用于操作迭代对象的函数。
import itertools
#从10开始数自然数
naturals =itertools.count(10)
from collections import Iterator
#判断naturals的类型
print(isinstance(naturals,Iterator))
for x in naturals:
if x>70:
break
print(x)
#cycle()会把传入的一个序列无限重复下去:
cycles =itertools.cycle("szhualeilei")
print(isinstance(cycles,Iterator))
n =0
for x in cycles :
#print(x)
n+=1
if n >100:
break
#repeat 重复
repeats =itertools.repeat("szhua",10)
for x in repeats:
print(x)
inter =(x**2 for x in range(100) if x%2==0and x%3==0)
#使用take while对Iterrator进行过滤:
ns =itertools.takewhile(lambda x :x<1000,inter)
print(list(ns))
#chain()
#chain()可以把一组迭代对象串联起来,形成一个更大的迭代器:
print(list(itertools.chain("fjksjdfk","abcdefghijklmn")))
#groupby()
#groupby()把迭代器中相邻的重复元素挑出来放在一起:
for key ,value in itertools.groupby("aaajjjfdsfkkkfffff"):
print(str(key).upper(),list(value))
| 14.123077
| 58
| 0.704793
| 118
| 918
| 5.483051
| 0.466102
| 0.02473
| 0.037094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032468
| 0.16122
| 918
| 64
| 59
| 14.34375
| 0.807792
| 0.224401
| 0
| 0.166667
| 0
| 0
| 0.081871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.291667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23593360ab941b0e68d201d7be4b82afc1cc2f9c
| 8,536
|
py
|
Python
|
flaskr/databaseCURD.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | null | null | null |
flaskr/databaseCURD.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | null | null | null |
flaskr/databaseCURD.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | 2
|
2021-03-23T12:22:04.000Z
|
2021-05-24T13:56:26.000Z
|
# 该模块提供了一个数据库的通用CURD接口
# 通过该接口能够快速进行数据库的增删查改功能
# 该模块还提供了获取数据库所有表表名,各表表头的接口
import traceback
import pymysql
from userManage import commmitChangeToUserlist, privilegeOfUser, ifManage
global db
# TODO: improve the robustness
def checkValibleTableName(targetTable, user):
if user != None and targetTable == 'user_list':
return user in getSuperUser()
return targetTable != None
def commitChangeToDatabase(oldInfo, newInfo, targetTable, user = None):
returnStatu = changeProcess(oldInfo, newInfo, targetTable, user)
if returnStatu == 0:
info = '错误的数据格式!'
elif returnStatu == -1:
info = '该表不存在!'
elif returnStatu == -2:
info = '非法访问:未经过用户认证'
elif returnStatu == -3:
info = '非法访问:用户无该权限'
elif returnStatu == -4:
info = '错误的数据格式:管理员用户拥有增删查改所有权限'
elif returnStatu == -5:
info = '用户名重复'
elif returnStatu == 1:
info = '运行成功!'
else:
info = '未知错误!'
return {'statu': returnStatu, 'info': info}
# this function call updataItem, insertItem, deleteItem
# according to the oldInfo and newInfo
# if oldInfo is None, call insert
# if newInfo is None, call delete
# else, call updata
#
# OK code: return 1
# error code:
# 0 : sql run time error
# -1 : invalid target table
# -2 : user is None
# -3 : user has not target privilege
# -4 : manager's privilege is not 'YYYY'
# -5 : user name chongfu
def changeProcess(oldInfo, newInfo, targetTable, user = None):
if user == None:
return -2
userPrivilege = privilegeOfUser(user).get('privilege')
global db
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
if oldInfo == None and newInfo == None or not checkValibleTableName(targetTable, user):
print('error ! invalid change!')
print('oldInfo:', oldInfo)
print('newInfo:', newInfo)
print('targetTable:', targetTable)
return -1
returnStatus = 0
if targetTable == 'user_list':
if ifManage(user) == 'Y':
return commmitChangeToUserlist(oldInfo, newInfo)
else:
return -3
if oldInfo == None:
if userPrivilege[1] == 'Y':
returnStatus = insertItem(newInfo, targetTable)
else:
returnStatus = -3
elif newInfo == None:
if userPrivilege[3] == 'Y':
returnStatus = deleteItem(oldInfo, targetTable)
else:
returnStatus = -3
else:
if userPrivilege[1] == 'Y':
returnStatus = updateItem(oldInfo, newInfo, targetTable)
else:
returnStatus = -3
return returnStatus
# shuffle : ((a,),(b,),(c,)) --> (a, b, c)
def signColumnsShuffle(input):
res = []
for i in input:
res.append(i[0])
return res
# shuffle datetime.date to str: 2021-02-20
def datetimeShffle(input):
res = []
for i in input:
temp = []
for k in i:
temp.append(str(k))
res.append(temp)
return res
def getTableHead(tableName):
print('start to get table head from ' + tableName)
cursor = db.cursor()
sql = "select column_name from information_schema.columns as col where col.table_name='%s'"%tableName
print('start to execute:')
print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('success ! \nget result: ')
print(res)
cursor.close()
return res
def getTableData(tableName):
cursor = db.cursor()
print('start to get table data from ' + tableName)
sql = "select * from %s"%tableName
# print('start to execute:')
# print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = datetimeShffle(res)
print(res)
cursor.close()
return res
def getSuperUser():
cursor = db.cursor()
sql = "select name from user_list where if_manager = 'Y'"
print('start to execute:')
print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('execute success!')
print('result:' ,res)
cursor.close()
return res
def getTableNames(user):
cursor = db.cursor()
print('start to get table names from yukiyu')
sql = "select table_name from information_schema.tables as tb where tb.table_schema = 'yukiyu'"
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('success ! \nget result: ')
print(res)
cursor.close()
# 非超级用户不允许查看user列表
if user not in getSuperUser():
res.remove('user_list')
# 将主表放在最前面
res.remove('bangumi_list')
res.insert(0, 'bangumi_list')
return res
# get all tables, including table names and data
def getDatabase(target, user):
global db
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
print('get url args:')
print(target)
res = {}
selectPriv = privilegeOfUser(user).get('privilege')[0]
for key in target:
if target[key] != 'tables':
# 获取数据表中的表头
res[target[key]+'Header'] = getTableHead(target[key])
# 获取数据表中的所有数据
if selectPriv == 'Y':
res[target[key]] = getTableData(target[key])
else:
res[target[key]] = None
else:
# 获取数据库中的所有数据表名
res['tableList'] = getTableNames(user)
return res
# return the string: key1=value1 seperate key2=valuue2...
def getKeyValueString(name, data, seperate=','):
res = ''
seperate = ' ' + seperate + ' '
length = len(name)
for i in range(length):
res += (name[i] + '=' + "'" + str(data[i]) + "'")
if i != length - 1:
res += seperate
return res
# return the string: value1 seperate value2...
# if strlization is True, when the data[i] is str, the value will be: 'value'
def getValueString(data, seperate=',', strlization = False):
seperate = ' ' + seperate + ' '
res = ''
strlize = ''
if strlization == True:
strlize = "'"
length = len(data)
for i in range(length):
res += (strlize + str(data[i]) + strlize)
if i != length - 1:
res += seperate
return res
def updateItem(oldInfo, newInfo, targetTable):
tableHead = getTableHead(targetTable)
setField = getKeyValueString(tableHead, newInfo, ',')
whereField = getKeyValueString(tableHead, oldInfo, 'and')
cursor = db.cursor()
returnStatus = 0
sql = """
update %s
set %s
where %s
"""%(targetTable, setField, whereField)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('updata error !')
db.rollback()
traceback.print_exception()
returnStatus = 0
db.close()
return returnStatus
def insertItem(newInfo, targetTable):
tableHeadStr = getValueString(getTableHead(targetTable))
valueStr = getValueString(newInfo,strlization=True)
cursor = db.cursor()
sql = """
insert into %s
(%s)
values
(%s)
"""%(targetTable, tableHeadStr, valueStr)
returnStatus = 0
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('insert error !')
db.rollback()
traceback.print_exc()
returnStatus = 0
db.close()
return returnStatus
def deleteItem(oldInfo, targetTable):
tableHead = getTableHead(targetTable)
whereField = getKeyValueString(tableHead, oldInfo, 'and')
cursor = db.cursor()
sql = """
delete from %s
where %s
"""%(targetTable, whereField)
returnStatus = 0
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('delete error !')
db.rollback()
traceback.print_exc()
returnStatus = 0
db.close()
return returnStatus
def getUserList():
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
cursor = db.cursor()
sql = 'select name, password, user_id from user_list'
cursor.execute(sql)
res = cursor.fetchall()
return res
| 28.740741
| 115
| 0.600633
| 925
| 8,536
| 5.523243
| 0.214054
| 0.017616
| 0.021139
| 0.022314
| 0.386768
| 0.316305
| 0.27892
| 0.262478
| 0.213936
| 0.213936
| 0
| 0.013799
| 0.278351
| 8,536
| 297
| 116
| 28.740741
| 0.815584
| 0.102156
| 0
| 0.541322
| 0
| 0
| 0.153413
| 0.009695
| 0
| 0
| 0
| 0.003367
| 0
| 1
| 0.066116
| false
| 0.016529
| 0.012397
| 0
| 0.165289
| 0.144628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
236087aea9a609e4effde96065112e3417f806cd
| 3,864
|
py
|
Python
|
src/imreg_dft/show.py
|
GCBallesteros/imreg_dft
|
3eb7137403dd0689711ff1dae78200b0fbdcedfb
|
[
"BSD-3-Clause"
] | 167
|
2015-02-28T19:14:52.000Z
|
2022-03-30T03:42:33.000Z
|
src/imreg_dft/show.py
|
GCBallesteros/imreg_dft
|
3eb7137403dd0689711ff1dae78200b0fbdcedfb
|
[
"BSD-3-Clause"
] | 40
|
2015-01-18T23:58:41.000Z
|
2021-08-02T13:36:48.000Z
|
src/imreg_dft/show.py
|
GCBallesteros/imreg_dft
|
3eb7137403dd0689711ff1dae78200b0fbdcedfb
|
[
"BSD-3-Clause"
] | 51
|
2015-02-27T21:19:55.000Z
|
2022-03-24T12:28:45.000Z
|
# -*- coding: utf-8 -*-
# show.py
# Copyright (c) 2016-?, Matěj Týč
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse as ap
from imreg_dft import cli
from imreg_dft import reporting
TOSHOW = (
"filtered input (I)mages",
"filtered input images (S)pectra",
"spectra (L)ogpolar transform",
"(1) angle-scale phase correlation",
"angle-scale transform (A)pplied",
"(2) translation phase correlation",
"(T)ile info",
)
TOSHOW_ABBR = "isl1a2t"
def create_parser():
parser = ap.ArgumentParser()
cli.update_parser_imreg(parser)
parser.add_argument("--prefix", default="reports")
parser.add_argument("--ftype", choices=("png", "pdf"), default="png")
parser.add_argument("--dpi", default=150, type=float)
parser.add_argument("--terse", default=False, action="store_true",
help="Don't show every smallest thing.")
parser.add_argument("--tex", default=False, action="store_true",
help="Use TeX to typeset labels (if applicable).")
parser.add_argument("--size", default=5, type=float,
help="Base image element size [in]")
parser.add_argument(
"--display", type=_show_valid, default=TOSHOW_ABBR,
help="String composing of '{}', meaning respectively: {}."
.format(TOSHOW_ABBR, ", ".join(TOSHOW)))
return parser
def _show_valid(stri):
stripped = stri.rstrip(TOSHOW_ABBR)
if len(stripped) > 0:
raise ap.ArgumentError("Argument contains invalid characters: {}"
.format(stripped))
return stri
def main():
parser = create_parser()
args = parser.parse_args()
opts = cli.args2dict(args)
reports = reporting.ReportsWrapper(args.display)
usetex = args.ftype == "pdf" and args.tex
from matplotlib import rc
if usetex:
rc("text", usetex=True)
rc("text.latex", unicode=True)
reporting.TEXT_MODE = "tex"
reports.set_global("dpi", args.dpi)
reports.set_global("ftype", args.ftype)
reports.set_global("size", args.size)
reports.set_global("usetex", usetex)
reports.set_global("terse", args.terse)
opts["show"] = False
opts["reports"] = reports
opts["prefix"] = args.prefix
cli.run(args.template, args.subject, opts)
reporting.report_tile(reports, args.prefix)
if __name__ == "__main__":
main()
| 35.449541
| 77
| 0.694358
| 503
| 3,864
| 5.256461
| 0.467197
| 0.023828
| 0.045008
| 0.017398
| 0.093041
| 0.074887
| 0.051437
| 0.051437
| 0.051437
| 0.051437
| 0
| 0.004864
| 0.201863
| 3,864
| 108
| 78
| 35.777778
| 0.852464
| 0.391822
| 0
| 0
| 0
| 0
| 0.233951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.067797
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
236634d05aadb9d36762574305057814f7a3b99e
| 3,939
|
py
|
Python
|
tests/unit/transport/pecan/models/response/test_health.py
|
jqxin2006/poppy
|
10636e6255c7370172422afece4a5c3d95c1e937
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/transport/pecan/models/response/test_health.py
|
jqxin2006/poppy
|
10636e6255c7370172422afece4a5c3d95c1e937
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/transport/pecan/models/response/test_health.py
|
jqxin2006/poppy
|
10636e6255c7370172422afece4a5c3d95c1e937
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
from poppy.common import util
from poppy.transport.pecan.models.response import health
from tests.unit import base
class TestDNSModel(base.TestCase):
def setUp(self):
super(TestDNSModel, self).setUp()
def test_dns_is_alive(self):
dns_model = health.DNSModel(True)
self.assertEqual('true', dns_model['online'])
def test_dns_is_not_alive(self):
dns_model = health.DNSModel(False)
self.assertEqual('false', dns_model['online'])
class TestStorageModel(base.TestCase):
def setUp(self):
super(TestStorageModel, self).setUp()
def test_storage_is_alive(self):
storage_model = health.StorageModel(True)
self.assertEqual('true', storage_model['online'])
def test_storage_is_not_alive(self):
storage_model = health.StorageModel(False)
self.assertEqual('false', storage_model['online'])
class TestProviderModel(base.TestCase):
def setUp(self):
super(TestProviderModel, self).setUp()
def test_provider_is_alive(self):
provider_model = health.ProviderModel(True)
self.assertEqual('true', provider_model['online'])
def test_provider_is_not_alive(self):
provider_model = health.ProviderModel(False)
self.assertEqual('false', provider_model['online'])
@ddt.ddt
class TestHealthModel(base.TestCase):
def setUp(self):
super(TestHealthModel, self).setUp()
self.mock_controller = util.dict2obj(
{'base_url': 'https://www.poppycdn.io/'})
@ddt.file_data('health_map.json')
def test_health(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('true',
health_model['storage'][storage_name]['online'])
dns_name = health_map['dns']['dns_name']
self.assertEqual('true',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_dns_not_available.json')
def test_health_dns_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
dns_name = health_map['dns']['dns_name']
self.assertEqual('false',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_storage_not_available.json')
def test_health_storage_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('false',
health_model['storage'][storage_name]['online'])
@ddt.file_data('health_map_provider_not_available.json')
def test_health_provider_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
providers = health_map['providers']
for provider in providers:
provider_name = provider['provider_name']
provider_is_alive = provider['is_alive']
provider_model = health_model['providers'][provider_name]
if provider_is_alive:
self.assertEqual('true', provider_model['online'])
else:
self.assertEqual('false', provider_model['online'])
| 35.809091
| 75
| 0.68393
| 482
| 3,939
| 5.352697
| 0.242739
| 0.059302
| 0.044186
| 0.031008
| 0.502713
| 0.494961
| 0.24031
| 0.228682
| 0.228682
| 0.196899
| 0
| 0.002876
| 0.205636
| 3,939
| 109
| 76
| 36.137615
| 0.821668
| 0.140899
| 0
| 0.342857
| 0
| 0
| 0.11873
| 0.032057
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.2
| false
| 0
| 0.057143
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
236931ea9461223fe34c99e295340ff93405cc67
| 229
|
py
|
Python
|
Src/Squar-root/squar-root.py
|
MadushikaPerera/Python
|
b7919b252c02b5e1017273a65dd022ac9d13f3e4
|
[
"MIT"
] | null | null | null |
Src/Squar-root/squar-root.py
|
MadushikaPerera/Python
|
b7919b252c02b5e1017273a65dd022ac9d13f3e4
|
[
"MIT"
] | null | null | null |
Src/Squar-root/squar-root.py
|
MadushikaPerera/Python
|
b7919b252c02b5e1017273a65dd022ac9d13f3e4
|
[
"MIT"
] | null | null | null |
#1
number = int(input("Enter a number to find the square root : "))
#2
if number < 0 :
print("Please enter a valid number.")
else :
#3
sq_root = number ** 0.5
#4
print("Square root of {} is {} ".format(number,sq_root))
| 20.818182
| 64
| 0.624454
| 39
| 229
| 3.615385
| 0.641026
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039106
| 0.218341
| 229
| 10
| 65
| 22.9
| 0.748603
| 0.017467
| 0
| 0
| 0
| 0
| 0.420814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2369a4c986708b3067b08b2725a7bdc63e4b378b
| 12,141
|
py
|
Python
|
Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import io
import time
import calendar
from resultsdbpy.controller.configuration import Configuration
from resultsdbpy.model.configuration_context_unittest import ConfigurationContextTest
from resultsdbpy.model.mock_repository import MockStashRepository, MockSVNRepository
from resultsdbpy.model.model import Model
class MockModelFactory(object):
ARCHIVE_ZIP = """UEsDBAoAAAAAAAtSBU8AAAAAAAAAAAAAAAAIABAAYXJjaGl2ZS9VWAwAZ2RIXWZkSF31ARQAUEsDBBQACAAIAA9SBU8AAAAAAAAAAAAAAAAQABAAYXJjaGl2ZS9maWxlLnR4dFVYDABovU1d
bmRIXfUBFABLSSxJBABQSwcIY/PzrQYAAAAEAAAAUEsDBAoAAAAAABRdCU8AAAAAAAAAAAAAAAAJABAAX19NQUNPU1gvVVgMACi+TV0ovk1d9QEUAFBLAwQKAAAAAAAUXQlPAAAAAAAA
AAAAAAAAEQAQAF9fTUFDT1NYL2FyY2hpdmUvVVgMACi+TV0ovk1d9QEUAFBLAwQUAAgACAAPUgVPAAAAAAAAAAAAAAAAGwAQAF9fTUFDT1NYL2FyY2hpdmUvLl9maWxlLnR4dFVYDABo
vU1dbmRIXfUBFABjYBVjZ2BiYPBNTFbwD1aIUIACkBgDJxAbMTAwegFpIJ+xhoEo4BgSEgRhgXXcAeIFaEqYoeICDAxSyfm5eokFBTmpejmJxSWlxakpKYklqcoBwVC1b4DYg4GBH6Eu
NzE5B2K+CUROFCFXWJpYlJhXkpmXypCd4hELUsUaKK4AVs0w95H9l352x+37375yVmg4n0+cf9BBob6BgYWxtWmKSUpSipGxtWNRckZmWWpMhZFBaElmTmZJpbWBs6GzkbOzpa6FpamF
romRm6Wuk7mFi66FqZuxiamLhauriSsDAFBLBwjEE3dr4AAAAHwBAABQSwMEFAAIAAgAzFwJTwAAAAAAAAAAAAAAABIAEABhcmNoaXZlL2luZGV4Lmh0bWxVWAwAor1NXaC9TV31ARQA
tVNdb+IwEHz3r9j2qZUCvfbt7hCSSQxYCnHOdsrxmBK3tRRilJj2+u9vbajKfejeDgliMruzM7PJ5GI0IpC6/Vtvn549XKXXcPfp9jPQ/b41wLvtGGjbQkQH6M1g+hfTjAkBaRo7+N4+
HLx1HdRdA4fBgO1gcId+a+KdB9vV/Rs8un43JPBq/TO4Pl7dwRPYucY+2m0dGBKoewN70++s96aBfe9ebIMH/1x7/DHI0rbu1XZPsHVdY0PTQGLXzvgvBG7Hv4kawD2+q9m6BusOg0cT
vkaVgbF+cC8BOtkngJ/Oebs1CeJ2gBbZAsnHwGjrVzU4ctvWdmf6MYG7P0XgsLMc3kWgv+aAwv6HDjj6izyN2x52pvP1+5pucAMO0R52tTe9rdvhI+y4okB7biGsWy+5AiXmek0lAzyX
UtzzjGUw2wAtyxxvFukYLqlC9BJokeF3Q4B9LyVTCoQEvipzjh1IIWmhOVNJaMqrjBeLBGaVhkJoyPmKayzTIsGxjPylD8QcVkymS/xLZzznehMnzrkuwrA5TqNQUql5WuVUEigrWQrF
IKjPuEpzylcsGwMKwKHA7lmhQS1pnp+7EdiZikJLjuKEVDBjKI/OEI8jig2SSZbqYOTjlGIwKCxPQJUs5XgIOTC0QeUmCVEgqWLfKqxCFDK6ogt0dfXvNEgIPa0kWwWxGIGqZkpzXWkG
CyGyGLJi8p6nTH2FXKgYVKVYgiM0TaIf5MCYEMfiWaV4DIwXmklZlZqL4hqWYo2BoEqKvVlMVhTRLe5DSNwq0oYcYvIJrJcMARmyjGnREIPC1FJ9XoYDMURNznxCwRY5X7AiZQEWgWbN
FbvGRXEVCvhx8JpuQFTRNdYET+R4Pnssk7hG4HOg2T0Pyk/VuHnFT49JjC1dnjIfk9FoSsjk2e/aKV5M3Zh+OvHWt2Zqu8b8GAdocnO8M7k5VZDJg2vepvENWxp8A+HV9W1zQSY3RwAr
A+VPUEsHCPbdMMviAgAAYQUAAFBLAwQUAAgACADMXAlPAAAAAAAAAAAAAAAAHQAQAF9fTUFDT1NYL2FyY2hpdmUvLl9pbmRleC5odG1sVVgMAKK9TV2gvU1d9QEUAGNgFWNnYGJg8E1M
VvAPVohQgAKQGAMnEBsxMDB6AWkgn7GGgSjgGBISBGGBddwB4gVoSpih4gIMDFLJ+bl6iQUFOal6OYnFJaXFqSkpiSWpygHBULVvgNiDgYEfoS43MTkHYr4JRE4UIVdYmliUmFeSmZfK
UL/XNxak6qLfEiGwaoa5j+y/9LM7bt//9pWzQsP5fOL8gw4K9Q0MLIytTVNMUpJSjIytHYuSMzLLUmMqjAxCSzJzMksqrQ2cDZ2NnJ0tdS0sTS10TYzcLHWdzC1cdC1M3YxNTF0sXF1N
XBkAUEsHCLRBGwrgAAAAfAEAAFBLAwQUAAgACAALUgVPAAAAAAAAAAAAAAAAEgAQAF9fTUFDT1NYLy5fYXJjaGl2ZVVYDABnZEhdZmRIXfUBFABjYBVjZ2BiYPBNTFbwD1aIUIACkBgD
JxAbMTAwCgFpIJ/RhYEo4BgSEgRhgXVsAeIJaEqYoOIeDAz8yfm5eokFBTmpermJyTkQ+T8QOVGEXGFpYlFiXklmXioDI0Ntye3fifMcHKZ8fXTEZauLLSPD3Ef2X/rZHbfvf/vKWaHh
fD4x7izUNzCwMLY2gAJrx6LkjMyy1JgKI4PQksyczJJKawNnQ2cjZ2dLXQtLUwtdEyM3S10ncwsXXQtTN2MTUxcLV1cTVwYAUEsHCAAolTbHAAAARAEAAFBLAQIVAwoAAAAAAAtSBU8A
AAAAAAAAAAAAAAAIAAwAAAAAAAAAAEDtQQAAAABhcmNoaXZlL1VYCABnZEhdZmRIXVBLAQIVAxQACAAIAA9SBU9j8/OtBgAAAAQAAAAQAAwAAAAAAAAAAECkgTYAAABhcmNoaXZlL2Zp
bGUudHh0VVgIAGi9TV1uZEhdUEsBAhUDCgAAAAAAFF0JTwAAAAAAAAAAAAAAAAkADAAAAAAAAAAAQP1BigAAAF9fTUFDT1NYL1VYCAAovk1dKL5NXVBLAQIVAwoAAAAAABRdCU8AAAAA
AAAAAAAAAAARAAwAAAAAAAAAAED9QcEAAABfX01BQ09TWC9hcmNoaXZlL1VYCAAovk1dKL5NXVBLAQIVAxQACAAIAA9SBU/EE3dr4AAAAHwBAAAbAAwAAAAAAAAAAECkgQABAABfX01B
Q09TWC9hcmNoaXZlLy5fZmlsZS50eHRVWAgAaL1NXW5kSF1QSwECFQMUAAgACADMXAlP9t0wy+ICAABhBQAAEgAMAAAAAAAAAABApIE5AgAAYXJjaGl2ZS9pbmRleC5odG1sVVgIAKK9
TV2gvU1dUEsBAhUDFAAIAAgAzFwJT7RBGwrgAAAAfAEAAB0ADAAAAAAAAAAAQKSBawUAAF9fTUFDT1NYL2FyY2hpdmUvLl9pbmRleC5odG1sVVgIAKK9TV2gvU1dUEsBAhUDFAAIAAgA
C1IFTwAolTbHAAAARAEAABIADAAAAAAAAAAAQKSBpgYAAF9fTUFDT1NYLy5fYXJjaGl2ZVVYCABnZEhdZmRIXVBLBQYAAAAACAAIAF4CAAC9BwAAAAA="""
THREE_WEEKS = 60 * 60 * 24 * 21
@classmethod
def create(cls, redis, cassandra, async_processing=False):
oldest_commit = time.time()
for repo in [MockStashRepository.safari(), MockSVNRepository.webkit()]:
for commits in repo.commits.values():
for commit in commits:
oldest_commit = min(oldest_commit, calendar.timegm(commit.timestamp.timetuple()))
model = Model(
redis=redis,
cassandra=cassandra,
repositories=[
MockStashRepository.safari(redis=redis),
MockSVNRepository.webkit(redis=redis),
],
default_ttl_seconds=time.time() - oldest_commit + Model.TTL_WEEK,
archive_ttl_seconds=time.time() - oldest_commit + Model.TTL_WEEK,
async_processing=async_processing,
)
with model.commit_context, model.commit_context.cassandra.batch_query_context():
for repository in model.commit_context.repositories.values():
for branch_commits in repository.commits.values():
for commit in branch_commits:
model.commit_context.register_commit(commit)
return model
@classmethod
def layout_test_results(cls):
default_result = {'expected': 'PASS', 'modifiers': '', 'actual': 'PASS', 'time': 1.2}
return dict(
details=dict(link='dummy-link'),
run_stats=dict(tests_skipped=0),
results={
'fast': {
'encoding': {
'css-cached-bom.html': default_result,
'css-charset-default.xhtml': default_result,
'css-charset.html': default_result,
'css-link-charset.html': default_result,
}
}
},
)
@classmethod
def iterate_all_commits(cls, model, callback):
repos = ('webkit', 'safari')
branches = (None, 'safari-606-branch')
for branch in branches:
commit_index = {repo: 0 for repo in repos}
commits_for_repo = {repo: sorted(model.commit_context.find_commits_in_range(repo, branch)) for repo in repos}
for repo in repos:
while max([commits_for_repo[r][commit_index[r]] for r in repos]) > commits_for_repo[repo][commit_index[repo]]:
if commit_index[repo] + 1 >= len(commits_for_repo[repo]):
break
commit_index[repo] += 1
while True:
commits = []
for repo in repos:
commits.append(commits_for_repo[repo][commit_index[repo]])
callback(commits)
youngest_next_repo = None
for repo in repos:
if commit_index[repo] + 1 >= len(commits_for_repo[repo]):
continue
if not youngest_next_repo:
youngest_next_repo = repo
continue
if commits_for_repo[youngest_next_repo][commit_index[youngest_next_repo] + 1] > commits_for_repo[repo][commit_index[repo] + 1]:
youngest_next_repo = repo
if not youngest_next_repo:
break
commit_index[youngest_next_repo] += 1
@classmethod
def add_mock_results(cls, model, configuration=Configuration(), suite='layout-tests', test_results=None):
if test_results is None:
test_results = cls.layout_test_results()
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
current = time.time()
old = current - cls.THREE_WEEKS
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
timestamp_to_use = current
if (complete_configuration.platform == 'Mac' and complete_configuration.version <= Configuration.version_to_integer('10.13')) \
or (complete_configuration.platform == 'iOS' and complete_configuration.version <= Configuration.version_to_integer('11')):
timestamp_to_use = old
cls.iterate_all_commits(model, lambda commits: model.upload_context.upload_test_results(complete_configuration, commits, suite=suite, test_results=test_results, timestamp=timestamp_to_use))
@classmethod
def process_results(cls, model, configuration=Configuration(), suite='layout-tests'):
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
for branch in (None, 'safari-606-branch'):
results_dict = model.upload_context.find_test_results(
configurations=[complete_configuration], suite=suite,
branch=branch, recent=False,
)
for config, results in results_dict.items():
for result in results:
model.upload_context.process_test_results(
configuration=config, commits=result['commits'], suite=suite,
test_results=result['test_results'], timestamp=result['timestamp'],
)
@classmethod
def add_mock_archives(cls, model, configuration=Configuration(), suite='layout-tests', archive=None):
archive = archive or io.BytesIO(base64.b64decode(cls.ARCHIVE_ZIP))
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
current = time.time()
old = current - cls.THREE_WEEKS
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
timestamp_to_use = current
if (complete_configuration.platform == 'Mac' and complete_configuration.version <= Configuration.version_to_integer('10.13')) \
or (complete_configuration.platform == 'iOS' and complete_configuration.version <= Configuration.version_to_integer('11')):
timestamp_to_use = old
cls.iterate_all_commits(model, lambda commits: model.archive_context.register(archive, complete_configuration, commits, suite=suite, timestamp=timestamp_to_use))
| 61.318182
| 205
| 0.730335
| 980
| 12,141
| 8.883673
| 0.312245
| 0.041006
| 0.014473
| 0.012405
| 0.247645
| 0.215369
| 0.205261
| 0.188146
| 0.175052
| 0.165403
| 0
| 0.041006
| 0.210609
| 12,141
| 197
| 206
| 61.629442
| 0.867383
| 0.105757
| 0
| 0.298701
| 0
| 0
| 0.357545
| 0.325427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0.006494
| 0.051948
| 0
| 0.123377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
236f461f8b6d07d3beef17a23e616ee5fd033b61
| 3,488
|
py
|
Python
|
02_Flask_REST/04_MongoDB_REST/app/main.py
|
CrispenGari/python-flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | 2
|
2021-11-08T07:37:18.000Z
|
2021-11-13T09:23:46.000Z
|
02_Flask_REST/04_MongoDB_REST/app/main.py
|
CrispenGari/Flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | null | null | null |
02_Flask_REST/04_MongoDB_REST/app/main.py
|
CrispenGari/Flask
|
3e7896f401920b8dd045d807212ec24b8353a75a
|
[
"Apache-2.0"
] | null | null | null |
from keys.keys import pwd
import pymongo
from flask import Flask, request, abort
from flask_restful import Resource, Api, reqparse, marshal_with, fields
"""
DATABASE CONFIGURATION
"""
databaseName = "students"
connection_url = f'mongodb+srv://crispen:{pwd}@cluster0.3zay8.mongodb.net/{databaseName}?retryWrites=true&w=majority'
client = pymongo.MongoClient(connection_url)
cursor = client.list_database_names()
db = client.blob
"""
Student post args
"""
student_post_args = reqparse.RequestParser()
student_post_args.add_argument("name", type=str, help="name required", required=True)
student_post_args.add_argument("surname", type=str, help="surname required", required=True)
student_post_args.add_argument("student_number", type=int, help="student number required", required=True)
student_post_args.add_argument("course", type=str, help="name required", required=True)
student_post_args.add_argument("mark", type=int, help="surname required", required=True)
"""
Student patch args
* We want to be able only to update student course and mark
"""
"""
Resource Fields
"""
resource_fields = {
'_id': fields.String,
'name': fields.String,
'surname': fields.String,
'course': fields.String,
'mark': fields.Integer,
"student_number":fields.Integer,
}
app = Flask(__name__)
app.config["ENV"] = "development"
api = Api(app)
class GetPatchDeleteStudent(Resource):
@marshal_with(resource_fields)
def get(self, id):
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
return cursor, 200
def delete(self, id):
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
db.students.delete_one({"student_number": id})
return "", 204
@marshal_with(resource_fields)
def patch(self, id):
args = student_post_args.parse_args()
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
if args["mark"]:
db.students.update_one({"student_number": id}, {"$set":
{"mark": args["mark"]}
})
if args["course"]:
db.students.update_one({"student_number": id}, {
"$set": {"course": args["course"]}
})
return db.students.find_one({"student_number": id}), 204
class PostStudent(Resource):
@marshal_with(resource_fields)
def post(self):
args = student_post_args.parse_args()
cursor = db.students.find_one({"student_number": args["student_number"]})
if cursor is None:
"""
Insert the students to the database.
"""
res = db.students.insert_one({
"name": args["name"],
"surname": args["surname"],
"student_number": args["student_number"],
"course": args["course"],
"mark": args["mark"]
})
print(res, type(res))
else:
abort(409, "Student number taken by another student")
return db.students.find_one({"student_number": args["student_number"]}), 201
api.add_resource(PostStudent, '/student')
api.add_resource(GetPatchDeleteStudent, '/student/<int:id>')
if __name__ == "__main__":
app.run(debug=True)
| 33.219048
| 117
| 0.641628
| 424
| 3,488
| 5.099057
| 0.245283
| 0.120259
| 0.06938
| 0.058279
| 0.457909
| 0.419056
| 0.36309
| 0.342738
| 0.265957
| 0.236355
| 0
| 0.009934
| 0.220757
| 3,488
| 105
| 118
| 33.219048
| 0.785504
| 0
| 0
| 0.24
| 0
| 0.013333
| 0.226253
| 0.029819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.053333
| 0
| 0.186667
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
236fe878b484e34a105ad050281a3bd06899f1d7
| 4,703
|
py
|
Python
|
data/validate_possession.py
|
lpraat/scep2019
|
f120ee20397648e708cce41a7949c70b523b6e56
|
[
"MIT"
] | 1
|
2021-11-02T20:34:22.000Z
|
2021-11-02T20:34:22.000Z
|
data/validate_possession.py
|
lpraat/scep2019
|
f120ee20397648e708cce41a7949c70b523b6e56
|
[
"MIT"
] | null | null | null |
data/validate_possession.py
|
lpraat/scep2019
|
f120ee20397648e708cce41a7949c70b523b6e56
|
[
"MIT"
] | 1
|
2021-11-02T20:34:29.000Z
|
2021-11-02T20:34:29.000Z
|
import csv
import math
import datetime
def build_target_possession(player_file, till):
possessions = []
to_skip = 1 # first line
with open(player_file) as csv_file:
reader = csv.reader(csv_file, delimiter=';')
for row in reader:
if to_skip:
to_skip -= 1
continue
if not row:
continue
if row[0] == 'Statistic:' or row[0] == '':
break
t = datetime.datetime.strptime(row[2], "%H:%M:%S.%f")
t = float(t.minute * 60 + t.hour * 60 * 60 + t.second) * math.pow(10, 12) + t.microsecond * math.pow(10, 6)
if t <= till:
possessions.append(t)
possession_time = 0
# always match begin end
if len(possessions) % 2 != 0:
possessions = possessions[-1:]
for i in range(0, len(possessions) - 1, 2):
possession_time += possessions[i + 1] - possessions[i]
return possession_time * 10 ** -12
def build_target_possessions_first_half():
players = (
"Nick Gertje",
"Dennis Dotterweich",
"Willi Sommer",
"Philipp Harlass",
"Roman Hartleb",
"Erik Engelhardt",
"Sandro Schneider",
"Leon Krapf",
"Kevin Baer",
"Luca Ziegler",
"Ben Mueller",
"Vale Reitstetter",
"Christopher Lee",
"Leon Heinze",
"Leo Langhans",
)
possessions = {}
for player in players:
file_name = f"oracle/Ball Possession/1st Half/{player}.csv"
# [(12397999951273772 - 10753295594424116L) * 10 ** -12 + 3.092 + 0.9885] * 10**12
player_possession = build_target_possession(file_name, 1648784856849656)
possessions[player] = player_possession
return possessions
def build_target_possessions_second_half():
players = (
"Nick Gertje",
"Dennis Dotterweich",
"Niklas Welzlein",
"Willi Sommer",
"Philipp Harlass",
"Roman Hartleb",
"Erik Engelhardt",
"Sandro Schneider",
"Leon Krapf",
"Kevin Baer",
"Luca Ziegler",
"Ben Mueller",
"Vale Reitstetter",
"Christopher Lee",
"Leon Heinze",
"Leo Langhans",
)
possessions = {}
for player in players:
file_name = f"oracle/Ball Possession/2nd Half/{player}.csv"
# [(14879639049922641 - 13086639146403495) * 10 ** -12 + 0.455 + 0.84795] * 10**12
player_possession = build_target_possession(file_name, 1794302853519146)
possessions[player] = player_possession
return possessions
def compute_errors_first_half():
target_posssessions = build_target_possessions_first_half()
predicted_possessions = {}
with open('../results/to_validate/first_half/ball_possession.txt') as f:
possessions = []
for row in f:
possessions.append(row)
possessions = possessions[::-1]
already_checked = set()
for event in possessions:
event_split = event.split(",")
player = event_split[1]
time = int(event_split[2])
if player not in already_checked:
predicted_possessions[player] = time * 10**-12
already_checked.add(player)
errors = {}
for player, possession in target_posssessions.items():
# I'm too lazy to rename where needed
if player == 'Willi Sommer':
player = 'Wili Sommer'
if player not in predicted_possessions:
continue
errors[player] = abs(possession - predicted_possessions[player])
return errors
def compute_errors_second_half():
target_posssessions = build_target_possessions_second_half()
predicted_possessions = {}
with open('../results/to_validate/second_half/ball_possession.txt') as f:
possessions = []
for row in f:
possessions.append(row)
possessions = possessions[::-1]
already_checked = set()
for event in possessions:
event_split = event.split(",")
player = event_split[1]
time = int(event_split[2])
if player not in already_checked:
predicted_possessions[player] = time * 10**-12
already_checked.add(player)
errors = {}
for player, possession in target_posssessions.items():
# I'm too lazy to rename where needed
if player == 'Willi Sommer':
player = 'Wili Sommer'
if player not in predicted_possessions:
continue
errors[player] = abs(possession - predicted_possessions[player])
return errors
| 26.874286
| 119
| 0.584095
| 506
| 4,703
| 5.278656
| 0.256917
| 0.011981
| 0.032946
| 0.019468
| 0.718832
| 0.694122
| 0.632722
| 0.593036
| 0.556346
| 0.519656
| 0
| 0.056089
| 0.313842
| 4,703
| 174
| 120
| 27.028736
| 0.771615
| 0.056772
| 0
| 0.691057
| 0
| 0
| 0.152179
| 0.024159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04065
| false
| 0
| 0.02439
| 0
| 0.105691
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2370cb70aa4ccbe33c76c9f8fc510ffbcf707f15
| 6,065
|
py
|
Python
|
directory_components/context_processors.py
|
uktrade/directory-components
|
f5f52ceeecd2975bff07d1bd3afa7a84046fdd50
|
[
"MIT"
] | 2
|
2019-06-24T20:22:23.000Z
|
2019-07-26T12:51:31.000Z
|
directory_components/context_processors.py
|
uktrade/directory-components
|
f5f52ceeecd2975bff07d1bd3afa7a84046fdd50
|
[
"MIT"
] | 278
|
2018-02-21T11:49:46.000Z
|
2021-09-16T08:27:54.000Z
|
directory_components/context_processors.py
|
uktrade/directory-components
|
f5f52ceeecd2975bff07d1bd3afa7a84046fdd50
|
[
"MIT"
] | 3
|
2019-05-02T15:26:26.000Z
|
2020-02-18T17:47:57.000Z
|
from directory_constants import urls
from django.conf import settings
from django.utils import translation
from directory_components import helpers
def ga360(request):
user = helpers.get_user(request)
is_logged_in = helpers.get_is_authenticated(request)
context = {'ga360': {'site_language': translation.get_language()}}
if is_logged_in and hasattr(user, 'hashed_uuid'):
context['ga360']['user_id'] = user.hashed_uuid
else:
context['ga360']['user_id'] = None
context['ga360']['login_status'] = is_logged_in
if hasattr(settings, 'GA360_BUSINESS_UNIT'):
context['ga360']['business_unit'] = settings.GA360_BUSINESS_UNIT
return context
def sso_processor(request):
url = request.build_absolute_uri()
sso_register_url = helpers.add_next(settings.SSO_PROXY_SIGNUP_URL, url)
return {
'sso_user': helpers.get_user(request),
'sso_is_logged_in': helpers.get_is_authenticated(request),
'sso_login_url': helpers.add_next(settings.SSO_PROXY_LOGIN_URL, url),
'sso_register_url': sso_register_url,
'sso_logout_url': helpers.add_next(settings.SSO_PROXY_LOGOUT_URL, url),
'sso_profile_url': settings.SSO_PROFILE_URL,
}
def analytics(request):
return {
'directory_components_analytics': {
'GOOGLE_TAG_MANAGER_ID': settings.GOOGLE_TAG_MANAGER_ID,
'GOOGLE_TAG_MANAGER_ENV': settings.GOOGLE_TAG_MANAGER_ENV,
'UTM_COOKIE_DOMAIN': settings.UTM_COOKIE_DOMAIN,
}
}
def cookie_notice(request):
return {
'directory_components_cookie_notice': {
'PRIVACY_COOKIE_DOMAIN': settings.PRIVACY_COOKIE_DOMAIN
}
}
def header_footer_processor(request):
magna_header = settings.MAGNA_HEADER or False
magna_urls = {
'magna_home': urls.magna.HOME,
'magna_where_to_export': urls.magna.WHERE_TO_EXPORT,
'magna_learn_to_export': urls.magna.LEARN_TO_EXPORT,
'magna_exportplan_dashboard': urls.magna.EXPORT_PLAN_DASHBOARD,
'magna_search': urls.magna.SEARCH,
'magna_privacy_and_cookies': urls.magna.PRIVACY_AND_COOKIES,
'magna_terms_and_conditions': urls.magna.TERMS_AND_CONDITIONS,
'magna_accessibility': urls.magna.ACCESSIBILITY,
'magna_cookie_preference_settings': urls.magna.COOKIE_PREFERENCE_SETTINGS,
'magna_contact_us': urls.magna.CONTACT_US,
'magna_performance': urls.magna.PERFORMANCE_DASHBOARD,
'magna_account': urls.magna.ACCOUNT,
'magna_advice': urls.magna.ADVICE,
'magna_markets': urls.magna.MARKETS,
'magna_services': urls.magna.SERVICES,
'magna_international': urls.magna.INTERNATIONAL,
}
advice_urls = {
'create_an_export_plan': urls.domestic.ADVICE_CREATE_AN_EXPORT_PLAN,
'find_an_export_market': urls.domestic.ADVICE_FIND_AN_EXPORT_MARKET,
'define_route_to_market': urls.domestic.ADVICE_DEFINE_ROUTE_TO_MARKET,
'get_export_finance_and_funding': urls.domestic.ADVICE_GET_EXPORT_FINANCE_AND_FUNDING,
'manage_payment_for_export_orders': urls.domestic.ADVICE_MANAGE_PAYMENT_FOR_EXPORT_ORDERS,
'prepare_to_do_business_in_a_foreign_country': urls.domestic.ADVICE_PREPARE_TO_DO_BUSINESS_IN_A_FOREIGN_COUNTRY,
'manage_legal_and_ethical_compliance': urls.domestic.ADVICE_MANAGE_LEGAL_AND_ETHICAL_COMPLIANCE,
'prepare_for_export_procedures_and_logistics': urls.domestic.ADVICE_PREPARE_FOR_EXPORT_PROCEDURES_AND_LOGISTICS,
}
header_footer_urls = {
'about': urls.domestic.ABOUT,
'dit': urls.domestic.DIT,
'get_finance': urls.domestic.GET_FINANCE,
'ukef': urls.domestic.GET_FINANCE,
'performance': urls.domestic.PERFORMANCE_DASHBOARD,
'privacy_and_cookies': urls.domestic.PRIVACY_AND_COOKIES,
'terms_and_conditions': urls.domestic.TERMS_AND_CONDITIONS,
'accessibility': urls.domestic.ACCESSIBILITY,
'cookie_preference_settings': urls.domestic.COOKIE_PREFERENCE_SETTINGS,
'fas': urls.international.TRADE_FAS,
'advice': urls.domestic.ADVICE,
'markets': urls.domestic.MARKETS,
'search': urls.domestic.SEARCH,
'services': urls.domestic.SERVICES,
'domestic_news': urls.domestic.GREAT_DOMESTIC_NEWS,
'international_news': urls.international.NEWS,
'how_to_do_business_with_the_uk': urls.international.EXPAND_HOW_TO_DO_BUSINESS,
'industries': urls.international.ABOUT_UK_INDUSTRIES,
'market_access': urls.domestic.HOME / 'report-trade-barrier'
}
header_footer_urls = {**header_footer_urls, **advice_urls, **magna_urls}
return {'magna_header': magna_header, 'header_footer_urls': header_footer_urls}
def invest_header_footer_processor(request):
invest_header_footer_urls = {
'industries': urls.international.ABOUT_UK_INDUSTRIES,
'uk_setup_guide': urls.international.EXPAND_HOW_TO_SETUP,
}
return {'invest_header_footer_urls': invest_header_footer_urls}
def urls_processor(request):
return {
'services_urls': {
'contact_us': urls.domestic.CONTACT_US,
'contact_us_international': urls.international.CONTACT_US,
'events': urls.domestic.EVENTS,
'exopps': urls.domestic.EXPORT_OPPORTUNITIES,
'exred': urls.domestic.HOME,
'great_domestic': urls.domestic.HOME,
'great_international': urls.international.HOME,
'fab': urls.domestic.FIND_A_BUYER,
'fas': urls.international.TRADE_FAS,
'feedback': urls.domestic.FEEDBACK,
'office_finder': urls.domestic.OFFICE_FINDER,
'invest': urls.international.EXPAND_HOME,
'soo': urls.domestic.SELLING_OVERSEAS,
'sso': urls.domestic.SINGLE_SIGN_ON,
'uk_setup_guide': urls.international.EXPAND_HOW_TO_SETUP,
'isd': urls.international.EXPAND_ISD_HOME,
}
}
def feature_flags(request):
return {'features': settings.FEATURE_FLAGS}
| 41.541096
| 120
| 0.711459
| 713
| 6,065
| 5.628331
| 0.207574
| 0.098679
| 0.040369
| 0.012709
| 0.227012
| 0.142786
| 0.085971
| 0.061301
| 0.022427
| 0
| 0
| 0.004876
| 0.188458
| 6,065
| 145
| 121
| 41.827586
| 0.810443
| 0
| 0
| 0.081301
| 0
| 0
| 0.224732
| 0.10404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065041
| false
| 0
| 0.03252
| 0.03252
| 0.162602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
237138c111b7235bbb0b60fb326edee46f57fa80
| 1,962
|
py
|
Python
|
src/leetcodepython/string/remove_duplicate_letters_316.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 90
|
2018-12-25T06:01:30.000Z
|
2022-01-03T14:01:26.000Z
|
src/leetcodepython/string/remove_duplicate_letters_316.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 1
|
2020-08-27T09:53:49.000Z
|
2020-08-28T08:57:49.000Z
|
src/leetcodepython/string/remove_duplicate_letters_316.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 27
|
2019-01-02T01:41:32.000Z
|
2022-01-03T14:01:30.000Z
|
# encoding='utf-8'
'''
/**
* This is the solution of No.316 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 返回字符串 text 中按字典序排列最小的子序列,该子序列包含 text 中所有不同字符一次。
* <p>
* 示例 1:
* <p>
* 输入:"cdadabcc"
* 输出:"adbc"
* 示例 2:
* <p>
* 输入:"abcd"
* 输出:"abcd"
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
class Solution:
def remove_duplicate_letters(self, s: str) -> str:
'''
移除重复字符
Args:
s: 字符串
Returns:
字典排序表
'''
nums_map = self.get_num_map(s)
in_stack_map = {}
stack = []
for ch in s:
nums_map[ch] -= 1
if ch in in_stack_map and in_stack_map[ch]:
continue
while len(stack) > 0 and ord(ch) < ord(stack[-1]) and nums_map[ch] > 0:
in_stack_map[stack[-1]] = False
stack.pop()
stack.append(ch)
in_stack_map[ch] = True
return ''.join(stack)
def get_num_map(self, s: str):
'''
统计字符出现个数
Args:
s: 字符串
Returns:
map
'''
num_map = {}
for ch in s:
if ch in num_map:
num_map[ch] += 1
else:
num_map[ch] = 1
return num_map
if __name__ == '__main__':
s = 'cdadabcc'
solution = Solution()
result = solution.remove_duplicate_letters(s)
assert result == 'adbc'
| 25.480519
| 109
| 0.469929
| 217
| 1,962
| 4.092166
| 0.40553
| 0.047297
| 0.056306
| 0.038288
| 0.146396
| 0.146396
| 0.146396
| 0.146396
| 0.146396
| 0.146396
| 0
| 0.011826
| 0.310398
| 1,962
| 76
| 110
| 25.815789
| 0.644494
| 0.463303
| 0
| 0.071429
| 0
| 0
| 0.021482
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23723b37428721d547ab23434d036479e7a2836c
| 1,055
|
py
|
Python
|
setup.py
|
julienvaslet/interactive-shell
|
9ae800f2d9bb3365b5e68b2beef577fb39264f10
|
[
"MIT"
] | null | null | null |
setup.py
|
julienvaslet/interactive-shell
|
9ae800f2d9bb3365b5e68b2beef577fb39264f10
|
[
"MIT"
] | null | null | null |
setup.py
|
julienvaslet/interactive-shell
|
9ae800f2d9bb3365b5e68b2beef577fb39264f10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from setuptools import setup
current_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(current_directory, "VERSION"), "r", encoding="utf-8") as f:
version = f.read()
with open(os.path.join(current_directory, "README.rst"), "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="interactive-shell",
version=version,
description="Interactive shell classes to easily integrate a terminal in application.",
long_description=long_description,
license="MIT License",
author="Julien Vaslet",
author_email="julien.vaslet@gmail.com",
url="https://github.com/julienvaslet/interactive-shell",
packages=["interactive_shell"],
install_requires=[],
scripts=[],
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development",
"Topic :: Terminals"
]
)
| 31.029412
| 91
| 0.660664
| 122
| 1,055
| 5.606557
| 0.581967
| 0.035088
| 0.02924
| 0.040936
| 0.146199
| 0.146199
| 0.099415
| 0
| 0
| 0
| 0
| 0.007026
| 0.190521
| 1,055
| 33
| 92
| 31.969697
| 0.793911
| 0.019905
| 0
| 0
| 0
| 0
| 0.396318
| 0.022287
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2381759676c1a13a9190cbf2cbe7006518dd9448
| 1,093
|
py
|
Python
|
behaviour/models.py
|
red-and-black/friendly
|
f453344ad1e9173ad3545e4ea0c825b65190b3c5
|
[
"Apache-2.0"
] | 2
|
2020-01-28T12:56:56.000Z
|
2021-07-02T03:07:39.000Z
|
behaviour/models.py
|
red-and-black/friendly
|
f453344ad1e9173ad3545e4ea0c825b65190b3c5
|
[
"Apache-2.0"
] | 5
|
2021-03-18T23:02:11.000Z
|
2021-09-17T11:02:08.000Z
|
behaviour/models.py
|
red-and-black/goodchat
|
1a391a04d4edfbcefaf87663f08308dd58578634
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class BehaviourReport(models.Model):
NOT_REVIEWED = 'not_reviewed'
UNDER_REVIEW = 'under_review'
COMPLETED = 'completed'
STATUS_CHOICES = (
(NOT_REVIEWED, 'Not reviewed'),
(UNDER_REVIEW, 'Under review'),
(COMPLETED, 'Completed')
)
# Automatic timestamping fields.
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Report
reporter = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
related_name='reporter'
)
reportee = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
related_name='reportee'
)
report = models.TextField(max_length=2000)
# Outcome/handling
public_outcome = models.CharField(max_length=255, blank=True)
private_outcome = models.CharField(max_length=255, blank=True)
status = models.CharField(
max_length=50,
choices=STATUS_CHOICES,
default=NOT_REVIEWED
)
class Meta:
ordering = ['-modified']
| 25.418605
| 66
| 0.651418
| 114
| 1,093
| 6.052632
| 0.447368
| 0.07971
| 0.078261
| 0.104348
| 0.466667
| 0.466667
| 0.466667
| 0.466667
| 0.342029
| 0.342029
| 0
| 0.014563
| 0.246112
| 1,093
| 42
| 67
| 26.02381
| 0.822816
| 0.049405
| 0
| 0.125
| 0
| 0
| 0.105314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.46875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88bb3f59329f873d5176f1525a62f453fd0b978d
| 2,879
|
py
|
Python
|
dockermap/map/runner/cmd.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 85
|
2015-01-02T01:05:14.000Z
|
2022-03-23T22:23:12.000Z
|
dockermap/map/runner/cmd.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 21
|
2015-02-10T18:25:03.000Z
|
2020-10-28T08:38:39.000Z
|
dockermap/map/runner/cmd.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 15
|
2015-02-27T12:19:35.000Z
|
2021-09-29T06:20:14.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from ..action import ContainerUtilAction
from ..input import ItemType
log = logging.getLogger(__name__)
class ExecMixin(object):
"""
Utility mixin for executing configured commands inside containers.
"""
action_method_names = [
(ItemType.CONTAINER, ContainerUtilAction.EXEC_COMMANDS, 'exec_commands'),
(ItemType.CONTAINER, ContainerUtilAction.EXEC_ALL, 'exec_container_commands'),
]
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None
def exec_container_commands(self, action, c_name, **kwargs):
"""
Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
config_cmds = action.config.exec_commands
if not config_cmds:
return None
return self.exec_commands(action, c_name, run_cmds=config_cmds)
| 40.549296
| 115
| 0.64571
| 364
| 2,879
| 4.92033
| 0.288462
| 0.027917
| 0.030709
| 0.044668
| 0.346734
| 0.307091
| 0.307091
| 0.307091
| 0.307091
| 0.307091
| 0
| 0.000477
| 0.271969
| 2,879
| 70
| 116
| 41.128571
| 0.854008
| 0.359847
| 0
| 0.057143
| 0
| 0
| 0.128252
| 0.013914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.114286
| 0
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88bc403d25f54bbc912895c21b6786cdfc90a30c
| 3,678
|
py
|
Python
|
main.py
|
PWN0N/Working-Time-lapse
|
1ebe4cb1a669a1b77528b4f2583e27fdd4e5953b
|
[
"MIT"
] | null | null | null |
main.py
|
PWN0N/Working-Time-lapse
|
1ebe4cb1a669a1b77528b4f2583e27fdd4e5953b
|
[
"MIT"
] | null | null | null |
main.py
|
PWN0N/Working-Time-lapse
|
1ebe4cb1a669a1b77528b4f2583e27fdd4e5953b
|
[
"MIT"
] | null | null | null |
import signal
import numpy as np
from PIL import ImageGrab
import cv2
import time
import sys
import os
flips_time_mins = 30
interval = 5 # seconds
num_frames = flips_time_mins*60/interval
num_frames = int(num_frames)
year = -1
month = -1
day = -1
out_fps = 24
cammode = 0
shutdown_msg = False
def signal_handler(signal,frame):
print('You Pressed Ctrl+C, The Program Will Be Shutdown')
global shutdown_msg
shutdown_msg = True
print('Saving Videos')
def add_timestamp(img):
time_str= time.strftime("%Y-%m-%d %H:%M:%S")
color=(255,255,255)
if np.mean( img[700:780,900:950])>128:
color=(0,0,0)
cv2.putText(img, time_str, (900, 700) ,cv2.FONT_HERSHEY_SIMPLEX ,0.8, color ,2)
return img
capture = cv2.VideoCapture(0)
capture1 = cv2.VideoCapture(1)
cam, _ = capture.read()
cam1, _ = capture1.read()
if(cam and cam1):
print('Dual Camera Mode')
cammode = 1
elif(cam):
print('Single Camera Mode')
cammode = 2
else:
print('No Camera Detect!')
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
# capture frames to video
while True:
if(day != time.strftime("%d")):
year = time.strftime("%Y")
month = time.strftime("%m")
day = time.strftime("%d")
hour = time.strftime("%H")
save_dir = "{0}/{1}/{2}".format(year, month, day)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# innner camera init
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
cam_filename = save_dir+"/cam_{:4}.avi".format(time.strftime("%H%M"))
video = cv2.VideoWriter(cam_filename, codec, out_fps, size)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture.read()
# desktop screen init
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# desktopFrame =np.array(desktopim.getdata(),dtype='uint8')\
# .reshape((desktopim.size[1],desktopim.size[0],3))
sp = desktopim.shape
sz1 = sp[0] # height(rows) of image
sz2 = sp[1] # width(colums) of image
desktopsize = (int(sz2),int(sz1))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
desktop_filename = save_dir+"/desktop_{:4}.avi".format(time.strftime("%H%M"))
desktopvideo = cv2.VideoWriter(desktop_filename, codec, out_fps, desktopsize)
# outter camera init
if (cammode == 1):
size1 = (int(capture1.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture1.get(cv2.CAP_PROP_FRAME_HEIGHT)))
cam1_filename = save_dir+"/cam1_{:4}.avi".format(time.strftime("%H%M"))
video1 = cv2.VideoWriter(cam1_filename, codec, out_fps, size1)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture1.read()
for i in range(num_frames):
if (shutdown_msg):
break
_, frame = capture.read()
video.write(add_timestamp(frame))
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# ImageGrab and OpenCV have different color space
desktopFrame = cv2.cvtColor(desktopim, cv2.COLOR_BGR2RGB)
desktopvideo.write(add_timestamp(desktopFrame))
if (cammode == 1):
_, frame1 = capture1.read()
video1.write(add_timestamp(frame1))
time.sleep(interval)
video.release()
desktopvideo.release()
if (cammode == 1):
video1.release()
if (shutdown_msg):
break
capture.release()
if(cammode ==1):
capture1.release()
print('Done!')
print('Exit The Program')
sys.exit(0)
| 27.244444
| 83
| 0.637575
| 504
| 3,678
| 4.535714
| 0.337302
| 0.047244
| 0.022747
| 0.022747
| 0.207349
| 0.207349
| 0.2021
| 0.08224
| 0.056868
| 0.056868
| 0
| 0.039387
| 0.219957
| 3,678
| 134
| 84
| 27.447761
| 0.757407
| 0.115552
| 0
| 0.163265
| 0
| 0
| 0.074383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.071429
| 0
| 0.102041
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88bd61d6346e9f097545fab6de60f3909f62dcdf
| 1,823
|
py
|
Python
|
tests/test_tokenizers.py
|
BMarcin/MordinezNLP
|
884f6c2ccade8ac796d40d3081560021e96765ca
|
[
"MIT"
] | 1
|
2021-02-03T19:38:05.000Z
|
2021-02-03T19:38:05.000Z
|
tests/test_tokenizers.py
|
BMarcin/MordinezNLP
|
884f6c2ccade8ac796d40d3081560021e96765ca
|
[
"MIT"
] | 13
|
2020-11-30T21:01:56.000Z
|
2021-03-12T21:23:45.000Z
|
tests/test_tokenizers.py
|
BMarcin/MordinezNLP
|
884f6c2ccade8ac796d40d3081560021e96765ca
|
[
"MIT"
] | null | null | null |
import unittest
import spacy
from spacy.language import Language
try:
from src.MordinezNLP.tokenizers import spacy_tokenizer
except:
from MordinezNLP.tokenizers import spacy_tokenizer
class TestTokenizers(unittest.TestCase):
nlp: Language = spacy.load("en_core_web_sm")
nlp.tokenizer = spacy_tokenizer(nlp)
def test_spacy_tokenizer_case1(self):
tokenized_data = self.nlp("Hello today is <date>, tomorrow it will be <number> degrees of celcius. I don't like him.")
self.assertEqual(
[str(token) for token in tokenized_data],
[
"Hello",
"today",
"is",
"<date>",
",",
"tomorrow",
"it",
"will",
"be",
"<number>",
"degrees",
"of",
"celcius",
".",
"I",
"do",
"n't",
"like",
"him",
"."
]
)
def test_spacy_tokenizer_case2(self):
tokenized_data = self.nlp('Punkt wir haben extra um <number> : <number> Uhr noch ein Event')
self.assertEqual(
[str(token) for token in tokenized_data],
[
"Punkt",
"wir",
"haben",
"extra",
"um",
"<number>",
":",
"<number>",
"be",
"<number>",
"Uhr",
"noch",
"ein",
"Event"
]
)
if __name__ == '__main__':
unittest.main()
| 26.808824
| 126
| 0.397696
| 143
| 1,823
| 4.902098
| 0.440559
| 0.099857
| 0.077033
| 0.091298
| 0.616262
| 0.379458
| 0.379458
| 0.28816
| 0.28816
| 0.156919
| 0
| 0.002174
| 0.495337
| 1,823
| 67
| 127
| 27.208955
| 0.759783
| 0
| 0
| 0.166667
| 0
| 0.016667
| 0.170049
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.033333
| false
| 0
| 0.083333
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88bdb402bf1da07ef8a27f4a47f88d7c557aae53
| 3,905
|
py
|
Python
|
scripts/autopost/image_maker.py
|
sahawaee/quotes-indonesia
|
ef6f0dc5afa460d8da6266f5df89d2a350cc9835
|
[
"MIT"
] | 6
|
2019-11-02T06:04:37.000Z
|
2022-03-27T14:41:45.000Z
|
scripts/autopost/image_maker.py
|
sahawaee/quotes-indonesia
|
ef6f0dc5afa460d8da6266f5df89d2a350cc9835
|
[
"MIT"
] | 1
|
2021-09-29T08:33:14.000Z
|
2021-11-06T02:10:38.000Z
|
scripts/autopost/image_maker.py
|
sahawaee/quotes-indonesia
|
ef6f0dc5afa460d8da6266f5df89d2a350cc9835
|
[
"MIT"
] | 8
|
2020-03-21T20:09:38.000Z
|
2022-03-11T19:14:24.000Z
|
import random
import requests
import tempfile
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
FONTS = [
'https://cdn.statically.io/gh/google/fonts/main/ofl/neucha/Neucha.ttf',
# 'https://cdn.statically.io/gh/google/fonts/main/ofl/catamaran/Catamaran%5Bwght%5D.ttf',
# font_base_url + 'lobstertwo.ttf',
# font_base_url + 'underdog.ttf',
# font_base_url + 'specialelite.ttf',
# font_base_url + 'abrilfatface.ttf',
# font_base_url + 'merienda.ttf',
# font_base_url + 'poiretone.ttf',
# font_base_url + 'shadowsintolight.ttf',
# font_base_url + 'caveatbrush.ttf',
# font_base_url + 'gochihand.ttf',
# font_base_url + 'itim.ttf',
# font_base_url + 'rancho.ttf'
]
# thanks to https://clrs.cc
COLORS = [
{'bg': (255, 255, 255), 'fg': (100, 100, 100)}
# { 'bg': (0, 31, 63), 'fg': (128, 191, 255) },
# { 'bg': (0, 116, 217), 'fg': (179, 219, 255) },
# { 'bg': (127, 219, 255), 'fg': (0, 73, 102) },
# { 'bg': (57, 204, 204), 'fg': (0, 0, 0) },
# { 'bg': (61, 153, 112), 'fg': (22, 55, 40) },
# { 'bg': (46, 204, 64), 'fg': (14, 62, 20) },
# { 'bg': (1, 255, 112), 'fg': (0, 102, 44) },
# { 'bg': (255, 220, 0), 'fg': (102, 88, 0) },
# { 'bg': (255, 133, 27), 'fg': (102, 48, 0) },
# { 'bg': (255, 65, 54), 'fg': (128, 6, 0) },
# { 'bg': (133, 20, 75), 'fg': (235, 122, 177) },
# { 'bg': (240, 18, 190), 'fg': (101, 6, 79) },
# { 'bg': (177, 13, 201), 'fg': (239, 169, 249) },
# { 'bg': (17, 17, 17), 'fg': (221, 221, 221) },
# { 'bg': (170, 170, 170), 'fg': (0, 0, 0) },
# { 'bg': (221, 221, 221), 'fg': (0, 0, 0) }
]
def image_maker(quote_by: str, quote_body: str) -> BytesIO:
# image configuration
img_width = 612
img_height = 612
# font configuration
font_selected = random.choice(FONTS)
fontfile = requests.get(font_selected, stream=True)
font = ImageFont.truetype(BytesIO(fontfile.content), 35)
# color configuration
color = random.choice(COLORS)
# draw image
image = Image.new('RGB', (img_width, img_height), color=color['bg'])
document = ImageDraw.Draw(image)
# find the average size of the letter in quote_body
sum = 0
for letter in quote_body:
sum += document.textsize(letter, font=font)[0]
average_length_of_letter = sum/len(quote_body)
# find the number of letters to be put on each linex
number_of_letters_for_each_line = (
img_width / 1.818) / average_length_of_letter
# build new text to put on the image
incrementer = 0
fresh_quote = ''
for letter in quote_body:
if (letter == '-'):
# fresh_quote += '\n\n' + letter #add some line breaks
fresh_quote += '' + letter
elif (incrementer < number_of_letters_for_each_line):
fresh_quote += letter
else:
if(letter == ' '):
fresh_quote += '\n'
incrementer = 0
else:
fresh_quote += letter
incrementer += 1
fresh_quote += '\n\n--' + quote_by
# render the text in the center of the box
dim = document.textsize(fresh_quote, font=font)
x2 = dim[0]
y2 = dim[1]
qx = (img_width / 2 - x2 / 2)
qy = (img_height / 2 - y2 / 2)
document.text((qx, qy), fresh_quote, align="center",
font=font, fill=color['fg'])
# save image to bytes
image_io = BytesIO()
image.save(image_io, 'JPEG', quality=100)
image_io.seek(0)
return image_io
def image_maker_make_file(quote_by: str, quote_body: str) -> str:
image_io = image_maker(quote_by, quote_body)
fd, image_path = tempfile.mkstemp(suffix='.jpg')
image_file = open(image_path, 'wb')
image_file.write(image_io.getbuffer())
image_file.close()
return image_path
| 33.956522
| 93
| 0.560051
| 534
| 3,905
| 3.938202
| 0.346442
| 0.036614
| 0.057537
| 0.073229
| 0.138374
| 0.08369
| 0.038041
| 0.038041
| 0.038041
| 0
| 0
| 0.095238
| 0.26863
| 3,905
| 114
| 94
| 34.254386
| 0.641106
| 0.410755
| 0
| 0.135593
| 0
| 0.016949
| 0.04646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.084746
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88beff2251e1c3db657b53d33c4f8b3982f9a861
| 5,093
|
py
|
Python
|
metashade/hlsl/sm5/profile.py
|
ppenenko/metashade
|
7148e808e47bace59e61e1483da9ddf3f9daa1cc
|
[
"Apache-2.0"
] | 3
|
2020-04-02T13:29:06.000Z
|
2020-09-07T17:43:09.000Z
|
metashade/hlsl/sm5/profile.py
|
ppenenko/metashade
|
7148e808e47bace59e61e1483da9ddf3f9daa1cc
|
[
"Apache-2.0"
] | null | null | null |
metashade/hlsl/sm5/profile.py
|
ppenenko/metashade
|
7148e808e47bace59e61e1483da9ddf3f9daa1cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Pavlo Penenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import metashade.rtsl.profile as rtsl
import metashade.clike.struct as struct
from . import data_types
from . import samplers
import sys, inspect
class UniformBuffer:
def __init__(self, sh, register : int, name : str = None):
self._sh = sh
self._name = name
self._register = register
def __enter__(self):
self._sh._emit('cbuffer')
if self._name is not None:
self._sh._emit(' ')
self._sh._emit(self._name)
self._sh._emit(
' : register(b{register})\n{{\n'.format(register = self._register)
)
self._sh._push_indent()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._sh._pop_indent()
self._sh._emit('};\n\n')
class Generator(rtsl.Generator):
_is_pixel_shader = False
class _UsedRegisterSet(set):
def __init__(self, category : str):
self._category = category
def check_candidate(self, register : int):
if register < 0:
raise RuntimeError('Invalid register value')
if register in self:
raise RuntimeError(self._category + ' register already in use')
def __init__(self, file_):
super(Generator, self).__init__(file_)
self._uniforms_by_semantic = dict()
self._used_uniform_buffer_registers = \
self.__class__._UsedRegisterSet('Uniform buffer')
self._used_texture_registers = \
self.__class__._UsedRegisterSet('Texture')
self._used_sampler_registers = \
self.__class__._UsedRegisterSet('Sampler')
def uniform_buffer(self, register : int, name : str = None):
self._used_uniform_buffer_registers.check_candidate(register)
return UniformBuffer(self, register = register, name = name)
# TODO: registers, packoffset
def uniform(
self,
name : str,
dtype,
semantic : str = None,
annotations = None
):
self._check_public_name(name)
if not self._check_global_scope():
raise RuntimeError(
"Uniforms can only be defined at the global scope"
)
if semantic is not None:
existing = self._uniforms_by_semantic.get(semantic)
if existing is not None:
raise RuntimeError(
"Can't define uniform '{name}' with semantic '{semantic}' "
"because uniform '{existing_name}' already uses that "
"semantic.".format(
name = name,
semantic = semantic,
existing_name = existing._name
)
)
value = dtype() #TODO: make it immutable
self._set_global(name, value)
self._emit_indent()
value._define(self, name, semantic, annotations = annotations)
self._emit(';\n')
def combined_sampler_2d(
self,
texture_name : str, texture_register : int,
sampler_name : str, sampler_register : int
):
self._check_public_name(texture_name)
self._check_public_name(sampler_name)
if not self._check_global_scope():
raise RuntimeError(
"Uniform textures and samplers "
"can only be defined at the global scope"
)
self._used_texture_registers.check_candidate(texture_register)
self._used_sampler_registers.check_candidate(sampler_register)
texture = samplers.Texture2d(self, texture_name, texture_register)
self._set_global(texture_name, texture)
self._used_texture_registers.add(texture_register)
sampler = samplers.Sampler(
self, sampler_name, sampler_register, texture
)
self._set_global(sampler_name, sampler)
self._used_sampler_registers.add(sampler_register)
def vs_input(self, name):
return stage_interface.VsInputDef(self, name)
def vs_output(self, name):
return stage_interface.VsOutputDef(self, name)
def ps_output(self, name):
return stage_interface.PsOutputDef(self, name)
# Reference all the data types from the generator class
for name, cls in inspect.getmembers(
sys.modules[data_types.__name__],
lambda member: (inspect.isclass(member)
and member.__module__ == data_types.__name__
and not member.__name__.startswith('_')
)):
setattr(Generator, name, cls)
| 34.181208
| 79
| 0.634204
| 582
| 5,093
| 5.24055
| 0.293814
| 0.028852
| 0.016393
| 0.032459
| 0.118033
| 0.090492
| 0.051148
| 0.051148
| 0.030164
| 0
| 0
| 0.003009
| 0.282152
| 5,093
| 148
| 80
| 34.412162
| 0.831236
| 0.128608
| 0
| 0.082569
| 0
| 0
| 0.080751
| 0.006107
| 0
| 0
| 0
| 0.006757
| 0
| 1
| 0.110092
| false
| 0
| 0.045872
| 0.027523
| 0.238532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c000e9e02c415df05d77f3582eae21f519869a
| 8,256
|
py
|
Python
|
backend/views.py
|
johnzhang1999/Pop
|
284cc1c5195efdc676759d8494965b2dfb44cf78
|
[
"MIT"
] | 1
|
2019-02-10T06:50:25.000Z
|
2019-02-10T06:50:25.000Z
|
backend/views.py
|
johnzhang1999/Pop
|
284cc1c5195efdc676759d8494965b2dfb44cf78
|
[
"MIT"
] | null | null | null |
backend/views.py
|
johnzhang1999/Pop
|
284cc1c5195efdc676759d8494965b2dfb44cf78
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from exponent_server_sdk import PushClient, PushMessage, DeviceNotRegisteredError
from .models import Group, User, Event
import hashlib, uuid
def getParams(request, tags):
print(request.POST)
return [request.POST[i] for i in tags]
def getHash(name, pwd):
return hashlib.sha256((name+pwd).encode()).digest()
# Create your views here.
@csrf_exempt
def getUid(request):#done, tested
[name] = getParams(request, ['name'])
q = User.objects.filter(pk=name)
if len(q) > 0:
return JsonResponse({'uid': q[0].uid})
else:
raise Http404("you done fked up")
@csrf_exempt
def joinOpenGroup(request):#done, tested
[uid, gid] = getParams(request, ['uid', 'gid'])
g = Group.objects.get(pk=gid)
u = User.objects.get(pk=uid)
if g.groupType == 'public' or g.groupType == 'private':
g.members.add(u)
g.save()
return JsonResponse({'success': 'true'})
else:
raise Http404("Invalid group or invalid user!")
@csrf_exempt
def addEvent(request):#done, tested
[uid, gid, name, desc, loc] = getParams(request, ['uid', 'gid', 'name', 'desc', 'loc'])
newEvent = Event(name=name, eid=str(uuid.uuid4()), desc=desc, loc=loc, owner=User.objects.get(pk=uid))
newEvent.save()
q = Group.objects.get(pk=gid)
q.events.add(newEvent)
q.save()
if q.groupType == 'private' or q.groupType == 'public':
responses = PushClient().publish_multiple([PushMessage(to=u.expoPushToken,
title='{} happening at {}!'.format(name, loc),
body=newEvent.desc,
ttl=3,
priority='high',
sound='default') for u in q.members.all()])
for i in range(len(responses)):
try:
responses[i].validate_response()
except DeviceNotRegisteredError:
u = q.members.all()[i]
u.expoPushToken = ''
u.save()
return JsonResponse({'eid': newEvent.eid})
@csrf_exempt
def deleteEvent(request):#done, BUGGY
[uid, eid] = getParams(request, ['uid', 'eid'])
q = Event.objects.get(pk=eid)
g = q.group_events.all()[0]
if uid == q.owner.uid or uid == g.owner.uid:
g.events.remove(q)
q.delete()
q.save()
return JsonResponse({'success': 'true'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def getGroupList(request):#done, tested
[uid] = getParams(request, ['uid'])
gList = User.objects.get(pk=uid).group_members.all()
return JsonResponse({'groupList': [g.gid for g in gList]})
@csrf_exempt
def getGroupInfo(request):#done, tested
[gid] = getParams(request, ['gid'])
g = Group.objects.get(pk=gid)
return JsonResponse({'gid': gid,'name': g.name, 'type': g.groupType,
'memberList': [u.uid for u in g.members.all()],
'owner': g.owner.uid, 'unconfirmed': 0})
@csrf_exempt
def getEventList(request):#done, should be ok
[gid] = getParams(request, ['gid'])
eList = Group.objects.get(gid=gid).events.all()
return JsonResponse({'eventList': [e.eid for e in eList]})
@csrf_exempt
def getEventInfo(request):#done, tested
[eid, uid] = getParams(request, ['eid', 'uid'])
q = Event.objects.get(pk=eid)
return JsonResponse({'eid': eid, 'name': q.name,'desc': q.desc, 'loc': q.loc,
'status': q.confirmed, 'initTime': q.initTime.strftime('%b-%d %I:%M %p'),
'owner': q.owner.uid, 'isOwner': uid == q.owner.uid or uid == q.group_events.all()[0].owner.uid})
@csrf_exempt
def register(request):#done, tested
[name, pwd] = getParams(request, ['name', 'pwd'])
if len(User.objects.filter(name=name)) > 0:
raise Http404("Try another name!")
newUser = User(name=name, uid=str(uuid.uuid4()), pwdHash=getHash(name, pwd))
newUser.save()
return JsonResponse({'uid': newUser.uid})
@csrf_exempt
def login(request):#done, tested
[name, pwd] = getParams(request, ['name', 'pwd'])
u = User.objects.get(name=name)
if u.pwdHash == getHash(name, pwd):
for otheruser in User.objects.all():
if otheruser.expoPushToken == u.expoPushToken:
otheruser.expoPushToken = ''
return JsonResponse({'uid': u.uid})
else:
raise Http404("Restricted access!")
@csrf_exempt
def createGroup(request):#done, tested
[uid, name, gtype] = getParams(request, ['uid', 'name', 'type'])
newGroup = Group(name=name, gid=str(uuid.uuid4()), owner=User.objects.get(uid=uid), groupType=gtype)
newGroup.save()
newGroup.members.add(User.objects.get(uid=uid))
newGroup.save()
return JsonResponse({'gid': newGroup.gid})
@csrf_exempt
def removeMember(request):#done, tested
[m_uid, uid, gid] = getParams(request, ['m_uid', 'uid', 'gid'])
if m_uid == Group.objects.get(pk=gid).owner.uid or m_uid == uid:
q = Group.objects.get(pk=gid)
q.members.remove(User.objects.get(pk=uid))
q.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def addMember(request):#done, tested
[m_uid, uid, gid] = getParams(request, ['m_uid', 'uid', 'gid'])
if m_uid == Group.objects.get(pk=gid).owner.uid:
q = Group.objects.get(pk=gid)
q.members.add(User.objects.get(pk=uid))
q.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def deleteGroup(request):#done, BUGGY
[gid, uid] = getParams(request, ['gid', 'uid'])
q = Group.objects.get(pk=gid)
if uid == q.owner.uid:
q.delete()
return JsonResponse({'status': 'success'})
else:
raise Http404("Restricted access!")
@csrf_exempt
def getUserInfo(request):#done, tested
[uid] = getParams(request, ['uid'])
name = User.objects.get(pk=uid).name
return JsonResponse({'name': name})
@csrf_exempt
def confirmEvent(request):#done, tested
[uid, eid] = getParams(request, ['uid', 'eid'])
e = Event.objects.get(pk=eid)
if len(e.confirmedMembers.filter(pk=uid)) == 0:
e.confirmed += 1
e.confirmedMembers.add(User.objects.get(pk=uid))
e.save()
if e.confirmed == 1:
g = e.group_events.all()[0]
if g.groupType == 'public':
responses = PushClient().publish_multiple([PushMessage(to=u.expoPushToken,
title="You'll never believe what you're missing out on!",
body="This is a test notification",
ttl=30,
priority='high',
sound='default') for u in g.members.all()])
for i in range(len(responses)):
try:
responses[i].validate_response()
except DeviceNotRegisteredError:
u = g.members.all()[i]
u.expoPushToken = ''
u.save()
return JsonResponse({'status': 'success'})
else:
raise Http404("Multiple confirmation")
@csrf_exempt
def search(request):#done, tested
[query] = getParams(request, ['q'])
return JsonResponse({'list': [g.gid for g in Group.objects.all()
if query in g.name and g.groupType == 'public']})
@csrf_exempt
def updateToken(request):
[token, uid] = getParams(request, ['token', 'uid'])
u = User.objects.get(uid=uid)
print("before: "+u.expoPushToken)
u.expoPushToken = token
u.save()
print("after: "+u.expoPushToken)
return JsonResponse({'status': 'success'})
| 38.222222
| 128
| 0.56468
| 969
| 8,256
| 4.773994
| 0.177503
| 0.049719
| 0.050584
| 0.029399
| 0.428448
| 0.358193
| 0.320363
| 0.270212
| 0.226762
| 0.172503
| 0
| 0.008311
| 0.285853
| 8,256
| 216
| 129
| 38.222222
| 0.776289
| 0.02798
| 0
| 0.426316
| 0
| 0
| 0.085051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.026316
| 0.005263
| 0.236842
| 0.015789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c03f34e857962f5d9b4b18e80b0b7a54e0e36b
| 3,572
|
py
|
Python
|
SDFConv/code/utils/vis/unet_vis.py
|
zshyang/FieldConvolution
|
ca88df568a6f2143dcb85d22c005fce4562a7523
|
[
"MIT"
] | 1
|
2021-01-03T18:53:06.000Z
|
2021-01-03T18:53:06.000Z
|
SDFConv/code/utils/vis/unet_vis.py
|
zshyang/FieldConvolution
|
ca88df568a6f2143dcb85d22c005fce4562a7523
|
[
"MIT"
] | null | null | null |
SDFConv/code/utils/vis/unet_vis.py
|
zshyang/FieldConvolution
|
ca88df568a6f2143dcb85d22c005fce4562a7523
|
[
"MIT"
] | null | null | null |
"""Implement this function across different project.
----ZY.2020.Oct.
"""
import os
from easydict import EasyDict
from torchvision.utils import save_image
from logging import Logger
from subprocess import call
def create_save_folders(root_folder, folder_list: list):
"""Create folders to save visualization image.
:param root_folder: The root folder.
:param folder_list: The list of folders
"""
for folder in folder_list:
os.makedirs(os.path.join(root_folder, folder), exist_ok=True)
def unet_vis(
in_batch: dict, out_batch: tuple, training: bool, epoch: int, step: int, options: EasyDict, logger: Logger
):
"""The visualization function of UNet.
:param in_batch: The input batch.
:param out_batch: The output batch.
:param training: Whether it is training stage.
:param epoch: The epoch number start with 1.
:param step: The step.
:param logger: The logger.
:param options: The options for visualization.
"""
# Folders
if training:
vis_dir = os.path.join(options.vis.dir, "train_vis")
else:
vis_dir = os.path.join(options.vis.dir, "val_vis")
out_dir = os.path.join(vis_dir, "epoch-{:04d}".format(epoch))
# Customize the list of folders.
dir_list = ["input_image", "info"]
# Create the list folders.
create_save_folders(out_dir, dir_list)
# The list of key in input and output batch.
key_list = ["input_image", ["loss"]]
batch = {}
batch.update(in_batch)
batch.update(out_batch[0])
batch.update(out_batch[1])
# Get the batch size.
if training:
batch_size = options.train.batch_size
else:
batch_size = options.test.batch_size
# Get number of steps each epoch.
if training: # Update the number of training samples in options.
num_step_each_epoch = options.dataset.len_train // (options.train.batch_size * options.num_gpus)
else: # Update the number of validation samples in options.
num_step_each_epoch = options.dataset.len_test // (options.test.batch_size * options.num_gpus)
# Save images and info.
for i in range(batch_size):
batch_id = step % num_step_each_epoch
fn = "data-{:04d}.png".format(batch_id * batch_size + i) # file name.
for key, folder in zip(key_list, dir_list):
if folder == "info":
with open(os.path.join(out_dir, folder, fn.replace('.png', '.txt')), 'w') as file:
for loss_item in key:
file.write("{}: {}\n".format(loss_item, batch[loss_item][i].item()))
else:
save_image(batch[key][i], os.path.join(out_dir, folder, fn))
# Get the KC step interval.
if training:
kc_steps = options.train.kc_steps
else:
kc_steps = options.test.kc_steps
# Generate HTML file.
mod_step = step % num_step_each_epoch # step starts ar 1.
extra_step = (mod_step + kc_steps) / num_step_each_epoch
if mod_step == 0 or extra_step > 1.0:
# Visualize HTML.
logger.info("Generating html visualization ...")
sublist = ",".join(dir_list)
script_path = os.path.join(os.path.abspath(os.getcwd()), "utils", "gen_html_hierarchy_local.py")
if not os.path.exists(script_path):
raise ValueError("{} this python script does not exist!".format(script_path))
cmd = "cd {} && python {} . 10 htmls {} {} > /dev/null".format(
out_dir, script_path, sublist, sublist
)
call(cmd, shell=True)
logger.info("DONE")
| 37.208333
| 114
| 0.645017
| 501
| 3,572
| 4.423154
| 0.275449
| 0.024368
| 0.031588
| 0.036101
| 0.130866
| 0.092058
| 0.092058
| 0.070397
| 0.044224
| 0.044224
| 0
| 0.006268
| 0.240761
| 3,572
| 95
| 115
| 37.6
| 0.810841
| 0.240482
| 0
| 0.155172
| 0
| 0
| 0.094046
| 0.010239
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.086207
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c151ffa4679f358142c0fae2020059a35ad3a9
| 1,465
|
py
|
Python
|
tests/test_models/test_metric_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 6
|
2015-01-28T05:59:08.000Z
|
2018-01-09T07:48:57.000Z
|
tests/test_models/test_metric_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 2
|
2020-05-09T16:36:43.000Z
|
2020-05-09T16:52:35.000Z
|
tests/test_models/test_metric_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 1
|
2016-01-13T07:19:44.000Z
|
2016-01-13T07:19:44.000Z
|
from nose.tools import assert_equals, assert_true
from wikimetrics.metrics import metric_classes
from wikimetrics.models import (
MetricReport
)
from ..fixtures import DatabaseTest
class MetricReportTest(DatabaseTest):
def setUp(self):
DatabaseTest.setUp(self)
self.common_cohort_1()
def test_basic_response(self):
metric = metric_classes['NamespaceEdits'](
name='NamespaceEdits',
namespaces=[0, 1, 2],
start_date='2013-01-01 00:00:00',
end_date='2013-01-02 00:00:00',
)
mr = MetricReport(
metric, self.cohort.id,
[
self.editors[0].user_id,
self.editors[1].user_id,
self.editors[2].user_id,
],
'wiki'
)
result = mr.run()
assert_equals(result[self.editor(0)]['edits'], 2)
def test_repr(self):
metric = metric_classes['NamespaceEdits'](
name='NamespaceEdits',
namespaces=[0, 1, 2],
start_date='2013-05-01 00:00:00',
end_date='2013-09-01 00:00:00',
)
mr = MetricReport(
metric, self.cohort.id,
[
self.editors[0].user_id,
self.editors[1].user_id,
self.editors[2].user_id,
],
'wiki'
)
assert_true(str(mr).find('MetricReport') >= 0)
| 28.173077
| 57
| 0.524915
| 159
| 1,465
| 4.698113
| 0.327044
| 0.042838
| 0.104418
| 0.091031
| 0.524766
| 0.524766
| 0.524766
| 0.473896
| 0.473896
| 0.473896
| 0
| 0.076433
| 0.356997
| 1,465
| 51
| 58
| 28.72549
| 0.716561
| 0
| 0
| 0.444444
| 0
| 0
| 0.107167
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.088889
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c3e3d167bb3169d56f9fd93e05df1be55709b1
| 1,903
|
py
|
Python
|
xugrid/data/synthetic.py
|
Deltares/xugrid
|
41881977e5e49d0f87a90dd995960283b812b921
|
[
"MIT"
] | 15
|
2021-10-04T15:18:33.000Z
|
2022-03-14T13:58:27.000Z
|
xugrid/data/synthetic.py
|
Deltares/xugrid
|
41881977e5e49d0f87a90dd995960283b812b921
|
[
"MIT"
] | 10
|
2021-11-10T15:12:02.000Z
|
2022-02-10T14:35:57.000Z
|
xugrid/data/synthetic.py
|
Deltares/xugrid
|
41881977e5e49d0f87a90dd995960283b812b921
|
[
"MIT"
] | null | null | null |
import meshzoo
import numpy as np
import xarray as xr
import xugrid
def transform(vertices, minx, maxx, miny):
"""
Transform vertices to fit within minx to maxx.
Maintains x:y aspect ratio.
"""
x, y = vertices.T
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
dx = xmax - xmin
dy = ymax - ymin
new_dx = maxx - minx
new_dy = dy / dx * new_dx
x = (x - xmin) * new_dx / dx + minx
y = (y - ymin) * new_dy / dy + miny
return np.column_stack([x, y])
def disk():
def function_z(x, y):
"""
from https://matplotlib.org/stable/gallery/images_contours_and_fields/tricontour_smooth_user.html
"""
r1 = np.sqrt((0.5 - x) ** 2 + (0.5 - y) ** 2)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.sqrt((-x - 0.2) ** 2 + (-y - 0.2) ** 2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(
2 * (np.exp((r1 / 10) ** 2) - 1) * 30.0 * np.cos(7.0 * theta1)
+ (np.exp((r2 / 10) ** 2) - 1) * 30.0 * np.cos(11.0 * theta2)
+ 0.7 * (x ** 2 + y ** 2)
)
zmin = z.min()
zmax = z.max()
return (zmax - z) / (zmax - zmin) * 10.0
vertices, triangles = meshzoo.disk(6, 8)
vertices = transform(vertices, 0.0, 10.0, 0.0)
grid = xugrid.Ugrid2d(
node_x=vertices[:, 0],
node_y=vertices[:, 1],
fill_value=-1,
face_node_connectivity=triangles,
)
ds = xr.Dataset()
ds["node_z"] = xr.DataArray(
data=function_z(*grid.node_coordinates.T),
dims=[grid.node_dimension],
)
ds["face_z"] = xr.DataArray(
data=function_z(*grid.face_coordinates.T),
dims=[grid.face_dimension],
)
ds["edge_z"] = xr.DataArray(
data=function_z(*grid.edge_coordinates.T),
dims=[grid.edge_dimension],
)
return xugrid.UgridDataset(ds, grid)
| 27.985294
| 105
| 0.527063
| 280
| 1,903
| 3.478571
| 0.317857
| 0.008214
| 0.036961
| 0.049281
| 0.113963
| 0.113963
| 0.113963
| 0
| 0
| 0
| 0
| 0.053071
| 0.306884
| 1,903
| 67
| 106
| 28.402985
| 0.685368
| 0.090384
| 0
| 0
| 0
| 0
| 0.010664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.075472
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c4cf9f6f8d805d5af7d3c164350e7934f1fcde
| 2,492
|
py
|
Python
|
ckanext/tess/group.py
|
ElixirUK/ckanext-tess
|
01725ff81b74f31d906875cb0cf493e7d3533615
|
[
"BSD-3-Clause"
] | 1
|
2015-05-18T08:31:28.000Z
|
2015-05-18T08:31:28.000Z
|
ckanext/tess/group.py
|
ElixirUK/ckanext-tess
|
01725ff81b74f31d906875cb0cf493e7d3533615
|
[
"BSD-3-Clause"
] | null | null | null |
ckanext/tess/group.py
|
ElixirUK/ckanext-tess
|
01725ff81b74f31d906875cb0cf493e7d3533615
|
[
"BSD-3-Clause"
] | null | null | null |
import ckan.plugins as plugins
import ckan.model as model
import ckan.logic as logic
import ckan.plugins.toolkit as toolkit
import ckan.lib.plugins as plugs
from pylons import c
NotFound = logic.NotFound
get_action = logic.get_action
class GroupPlugin(plugins.SingletonPlugin, plugs.DefaultGroupForm):
plugins.implements(plugins.IGroupForm, inherit=False)
plugins.implements(plugins.interfaces.IGroupController, inherit=True)
def before_view(self, group):
if c.controller == 'group':
group['owner'] = group_owner(group)
if c.userobj and c.userobj.id:
group['display'] = True
else:
group['display'] = False
return group
def group_types(self):
return ['group']
def is_fallback(self):
return True
def form_to_db_schema(self):
schema = super(GroupPlugin, self).form_to_db_schema()
schema = self._modify_group_schema(schema)
return schema
def db_to_form_schema(self):
schema = super(GroupPlugin, self).form_to_db_schema()
_convert_from_extras = toolkit.get_converter('convert_from_extras')
_ignore_missing = toolkit.get_validator('ignore_missing')
_boolean = toolkit.get_validator('boolean_validator')
default_validators = [_convert_from_extras, _ignore_missing, _boolean]
schema.update({
'private': default_validators
})
return schema
def _modify_group_schema(self, schema):
#Import core converters and validators
_convert_to_extras = toolkit.get_converter('convert_to_extras')
_ignore_missing = toolkit.get_validator('ignore_missing')
_boolean = toolkit.get_validator('boolean_validator')
default_validators = [_ignore_missing, _boolean, _convert_to_extras]
schema.update({
'private': default_validators
})
return schema
def group_owner(group):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'for_view': True}
admin = logic.get_action('member_list')(context, {'id': group.get('name'), 'object_type': 'user', 'capacity': 'admin'})
if admin and isinstance(admin, list) and admin[0][0]:
user = logic.get_action('user_show')(context, {'id': admin[0][0]})
return {'name': user.get('display_name'), 'link': user.get('id')}
else:
return {'name': 'unknown', 'link': '--'}
| 33.675676
| 123
| 0.656902
| 292
| 2,492
| 5.35274
| 0.273973
| 0.038388
| 0.048624
| 0.026871
| 0.332694
| 0.269994
| 0.269994
| 0.269994
| 0.204734
| 0.204734
| 0
| 0.002088
| 0.23114
| 2,492
| 73
| 124
| 34.136986
| 0.813674
| 0.014848
| 0
| 0.303571
| 0
| 0
| 0.106036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.107143
| 0.035714
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c6dfcf5b1c5b830035587feb18704990928ca6
| 2,087
|
py
|
Python
|
package/spack-discovardenovo/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-discovardenovo/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-discovardenovo/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Discovardenovo(AutotoolsPackage):
"""DISCOVAR de novo is a large (and small) de novo genome assembler.
It quickly generates highly accurate and complete assemblies using the
same single library data as used by DISCOVAR. It currently doesn't
support variant calling, for that, please use DISCOVAR instead."""
homepage = "https://software.broadinstitute.org/software/discovar/blog/"
url = "ftp://ftp.broadinstitute.org/pub/crd/DiscovarDeNovo/latest_source_code/discovardenovo-52488.tar.gz"
version('52488', '2b08c77b1b998d85be8048e5efb10358')
# lots of compiler errors with GCC7, works with 4.8.5
# and devs claim it works with 4.7 so I'm assuming 4.7-4.8'll work
conflicts('%gcc@5:')
conflicts('%gcc@:4.7.0')
depends_on('samtools')
depends_on('jemalloc')
| 45.369565
| 115
| 0.689506
| 290
| 2,087
| 4.948276
| 0.617241
| 0.022997
| 0.016725
| 0.029268
| 0.07108
| 0.07108
| 0.07108
| 0
| 0
| 0
| 0
| 0.045195
| 0.162434
| 2,087
| 45
| 116
| 46.377778
| 0.775744
| 0.679444
| 0
| 0
| 0
| 0.111111
| 0.492441
| 0.280778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88c7e5dd6da0bf02bd4f18142f4b9f76bc68b52c
| 10,379
|
py
|
Python
|
s2_convert.py
|
uscensusbureau/SABLE
|
883d449e4e6b75636d2589f540e86a5401e09932
|
[
"CC0-1.0"
] | 27
|
2017-11-06T22:55:24.000Z
|
2021-06-11T12:56:03.000Z
|
s2_convert.py
|
uscensusbureau/SABLE
|
883d449e4e6b75636d2589f540e86a5401e09932
|
[
"CC0-1.0"
] | 1
|
2018-01-31T18:26:23.000Z
|
2018-01-31T18:26:23.000Z
|
s2_convert.py
|
uscensusbureau/SABLE
|
883d449e4e6b75636d2589f540e86a5401e09932
|
[
"CC0-1.0"
] | 8
|
2017-10-05T19:17:05.000Z
|
2020-10-21T23:08:34.000Z
|
#Name: s2_convert.py
#Purpose: Convert PDFs to TXT format
#Invocation: python3 s2_convert.py <projName> <lng> <clss>
import codecs
import os
import re
import sys
#Name: valid_arguments
#Purpose: Check whether the command-line arguments are valid
#Parameters: sys.argv (globally defined list of command-line arguments)
#Returns: True (arguments are valid) or False (arguments are invalid)
def valid_arguments():
lngValid = set(["danish", "dutch", "english", "finnish", "french", "german", "hungarian", "italian", "norwegian", "portuguese", "spanish", "swedish", "turkish"])
clssValid = set(["neg", "pos", "pred"])
if len(sys.argv) == 4 and re.search(r"^[a-zA-Z][a-zA-Z_-]*$", sys.argv[1]) and sys.argv[2] in lngValid and sys.argv[3] in clssValid:
return True
return False
#Name: match_page
#Purpose: Match line to an XML page tag
#Parameters: line (line of text from XML file)
#Returns: Regular expression match object
def match_page(line):
return re.search(r"<page id=\"(\d+)\"", line)
#Name: match_textbox
#Purpose: Match line to an XML textbox tag
#Parameters: line (line of text from XML file)
#Returns: Regular expression match object
def match_textbox(line):
return re.search(r"<textbox id=\"(\d+)\"", line)
#Name: match_textline
#Purpose: Match line to an XML textline tag
#Parameters: line (line of text from XML file)
#Returns: Regular expression match object
def match_textline(line):
return re.search(r"<textline", line)
#Name: match_text
#Purpose: Match line to an XML text tag
#Parameters: line (line of text from XML file)
#Returns: Regular expression match object
def match_text(line):
return re.search(r"<text.*font=\"(.*)\".*bbox=\"([0-9]+\.[0-9]+),([0-9]+\.[0-9]+),([0-9]+\.[0-9]+),([0-9]+\.[0-9]+)\".*size=\"([0-9]+\.[0-9]+)\">(.*)</text>", line)
#Name: clean_char
#Purpose: Clean character to deal with punctuation, numbers, and foreign accent marks
#Parameters: old (character)
#Returns: Cleaned character
def clean_char(old):
#Check the length of the argument
if len(old) == 0:
new = ""
elif len(old) >= 2:
new = " "
else:
#The function "ord" returns the integer representing the Unicode code point of a character
ucp = ord(old)
#Control codes
if (0 <= ucp <= 31):
new = " "
#Punctuation
elif (32 <= ucp <= 38) or (40 <= ucp <= 47) or (58 <= ucp <= 64) or (91 <= ucp <= 96) or (123 <= ucp <= 126) or ucp == 8221:
new = " "
#Apostrophe
elif ucp == 39 or ucp == 8217:
new = ""
#Numbers
elif (48 <= ucp <= 57):
new = " "
#Letters
elif (192 <= ucp <= 198) or (224 <= ucp <= 230):
new = "a"
elif ucp == 199 or ucp == 231:
new = "c"
elif (200 <= ucp <= 203) or (232 <= ucp <= 235):
new = "e"
elif (204 <= ucp <= 207) or (236 <= ucp <= 239):
new = "i"
elif ucp == 209 or ucp == 241:
new = "n"
elif (210 <= ucp <= 214) or ucp == 216 or (242 <= ucp <= 246) or ucp == 248:
new = "o"
elif ucp == 223:
new = "ss"
elif (217 <= ucp <= 220) or (249 <= ucp <= 252):
new = "u"
elif ucp == 221 or ucp == 253 or ucp == 255:
new = "y"
elif ucp >= 128:
new = " "
else:
new = old
return new
#Name: get_chars
#Purpose: Extract the character values, coordinates, hierarchy, and font information from XML file
#Parameters: xmlFile (location of XML file)
#Returns: List of tuples (one for each character) containing character data
def get_chars(xmlFile):
chars = []
page = 0
textbox = 0
textline = 0
#Open XML file and use regular expressions to parse contents
f = codecs.open(xmlFile, "rU", encoding="utf8")
for l in f:
line = l.strip()
pageMatch = match_page(line)
textboxMatch = match_textbox(line)
textlineMatch = match_textline(line)
textMatch = match_text(line)
if pageMatch:
page = int(pageMatch.group(1))
elif textboxMatch:
textline = 0
textbox = int(textboxMatch.group(1))
elif textlineMatch:
textline += 1
elif textMatch:
font = textMatch.group(1)
x1 = float(textMatch.group(2))
y1 = float(textMatch.group(3))
x2 = float(textMatch.group(4))
y2 = float(textMatch.group(5))
size = float(textMatch.group(6))
value = clean_char(textMatch.group(7))
chars.append((page, textbox, textline, x1, y1, x2, y2, size, font, value))
f.close()
return chars
#Name: clean_text
#Purpose: Clean string of text and check each word against a list of stop words
#Parameters: text (string of text)
#Returns: Cleaned text
def clean_text(text):
text = text.lower()
text = re.sub("\s+", " ", text)
#Remove stop words
textClean = []
text = text.split(" ")
global stopWords
for word in text:
word = word.strip()
if word not in stopWords:
textClean.append(word)
textClean = " ".join(textClean)
return textClean
#Name: write_text
#Purpose: Construct words character by character
#Parameters: chars (list of tuples)
# txtFile (location of TXT file)
#Returns:
def write_text(chars, txtFile):
text = []
#Sort characters according to page, textbox, textline, y1, and x1
chars = sorted(chars, key = lambda z: (z[0], z[1], z[2], -z[4], z[3]))
pageCur = chars[0][0]
textboxCur = chars[0][1]
textlineCur = chars[0][2]
for char in chars:
spaceFlag = 0
pageNew = char[0]
textboxNew = char[1]
textlineNew = char[2]
if pageNew != pageCur:
pageCur = pageNew
spaceFlag = 1
if textboxNew != textboxCur:
textboxCur = textboxNew
spaceFlag = 1
if textlineNew != textlineCur:
textlineCur = textlineNew
spaceFlag = 1
if spaceFlag == 1:
text.append(" ")
text.append(char[9])
text = "".join(text)
f = codecs.open(txtFile, "w")
f.write(clean_text(text))
f.close()
return
#Name: create_output
#Purpose: Convert a PDF document of a given class to TXT format
#Parameters: projName (project name)
# clss ("pos" or "neg")
# docName (document name)
#Returns:
def create_output(projName, clss, docName):
#Create file locations
pdfFile = "/" + projName + "/" + clss + "_pdf/" + docName + ".pdf"
xmlFile = "/" + projName + "/" + clss + "_xml/" + docName + ".xml"
txtFile = "/" + projName + "/" + clss + "_txt/" + docName + ".txt"
probFile = "/" + projName + "/" + clss + "_prob/" + docName + ".pdf"
#probFlag indicates whether there is a problem extracting text from the PDF
#The problem PDFs are moved to separate folders where they can be inspected
probFlag = 0
chars = []
#If the TXT file does not already exist, then try creating it
if not os.path.isfile(txtFile):
try:
#The pdf2txt.py program comes with the PDFMiner module
os.system("pdf2txt.py -o " + xmlFile + " -t xml " + pdfFile)
except PDFTextExtractionNotAllowed:
#Exception indicates that text cannot be extracted from the PDF
probFlag = 1
if not os.path.isfile(xmlFile):
probFlag = 1
elif os.stat(xmlFile).st_size == 0:
probFlag = 1
if probFlag == 0:
chars = get_chars(xmlFile)
if len(chars) == 0:
probFlag = 1
#Check probFlag value and act accordingly
if probFlag == 0:
write_text(chars, txtFile)
if os.path.isfile(xmlFile):
#The intermediate XML file is deleted because it tends to be large
os.remove(xmlFile)
print(docName)
elif probFlag == 1:
if os.path.isfile(xmlFile):
#The intermediate XML file is deleted because it tends to be large
os.remove(xmlFile)
if os.path.isfile(txtFile):
#Any text that has been extracted from the problem PDF is deleted
os.remove(txtFile)
os.system("mv " + pdfFile + " " + probFile)
print("!!! PROBLEM: " + docName)
return
#Name: convert_files
#Purpose: Convert PDFs to TXT format
#Parameters: projName (project name)
# lng (language)
# clss ("neg", "pos", or "pred")
#Returns:
def convert_files(projName, lng, clss):
#Read in stop words
stopWordsList = []
f = codecs.open("stop_" + lng + ".txt", "rU")
for word in f:
if word.strip() != "":
stopWordsList.append(word.strip())
f.close()
global stopWords
stopWords = set(stopWordsList)
#Iterate through PDFs of a given class, extract text, and create output files
print("\n***** " + clss + " *****\n")
pdfs = sorted(os.listdir("/" + projName + "/" + clss + "_pdf/"))
for pdf in pdfs:
pdfMatch = re.search(r"^(\S+)\.([pP][dD][fF])$", pdf)
if pdfMatch:
docName = pdfMatch.group(1)
if pdfMatch.group(2) != "pdf":
oldFile = "/" + projName + "/" + clss + "_pdf/" + docName + "." + pdfMatch.group(2)
newFile = "/" + projName + "/" + clss + "_pdf/" + docName + ".pdf"
os.system("mv " + oldFile + " " + newFile)
create_output(projName, clss, docName)
print("")
return
def main():
if valid_arguments():
convert_files(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print("\nInvalid arguments\n")
return
if __name__ == "__main__":
main()
| 34.946128
| 169
| 0.544561
| 1,244
| 10,379
| 4.500804
| 0.264469
| 0.003572
| 0.004286
| 0.005715
| 0.177353
| 0.131988
| 0.107162
| 0.092874
| 0.092874
| 0.092874
| 0
| 0.032161
| 0.328933
| 10,379
| 296
| 170
| 35.064189
| 0.771716
| 0.299547
| 0
| 0.190476
| 0
| 0
| 0.061449
| 0.006377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.021164
| 0.021164
| 0.153439
| 0.026455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88cc892509c73d91def940743039ba5fd8a12e2f
| 5,818
|
py
|
Python
|
copy-net3_main0_1_2_20-01-10_14-12-14_epoch600_lr0-3_decay0-0_decay20-0_decay39e-05_seed0/main0.py
|
ninfueng/nsn
|
a214eafbcf5cf6dedb57131bc6eb1d307797f2ab
|
[
"MIT"
] | null | null | null |
copy-net3_main0_1_2_20-01-10_14-12-14_epoch600_lr0-3_decay0-0_decay20-0_decay39e-05_seed0/main0.py
|
ninfueng/nsn
|
a214eafbcf5cf6dedb57131bc6eb1d307797f2ab
|
[
"MIT"
] | null | null | null |
copy-net3_main0_1_2_20-01-10_14-12-14_epoch600_lr0-3_decay0-0_decay20-0_decay39e-05_seed0/main0.py
|
ninfueng/nsn
|
a214eafbcf5cf6dedb57131bc6eb1d307797f2ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The code implementation of SharedGradNet.
main0.py is for neural networks without hidden layer.
Some part from: https://jhui.github.io/2018/02/09/PyTorch-Variables-functionals-and-Autograd/
2019/06/17: Update with hyper-parameter tuning script.
2019/06/25: Committed main0.py.
"""
__author__ = 'Ninnart Fuengfusin'
__version__ = '0.0.1'
__email__ = 'ninnart.fuengfusin@yahoo.com'
import os
import time
import logging
import argparse
import torch
import torch.nn as nn
import model
from weight_decay import *
from dataset import load_dataset
from utils import *
from recorder import Recorder
from updater import UpdateMomentum
from namer import namer
parser = argparse.ArgumentParser(description='PyTorch implementation of SharedGradNet.')
parser.add_argument('--epoch', '-e', type=int, default=600, help='Number of training epoch.')
parser.add_argument('--learning_rate', '-lr', type=float, default=3e-1, help='A floating for initial learning rate.')
parser.add_argument('--train_batch', type=int, default=128, help='A integer for train batch amount.')
parser.add_argument('--test_batch', type=int, default=128, help='A integer for test batch amount')
parser.add_argument('--num_neuron', type=int, default=784,
help='Number of neurons in fully connected layer for produce codes')
parser.add_argument('--weight_decay', type=float, default=0, help='A floating for weight decay.')
parser.add_argument('--load', type=str2bool, default=False,
help='A boolean for loading weights from load_location or not.')
parser.add_argument('--load_location', type=str, default='model1-baseline',
help='A string of location for loading weights.')
parser.add_argument('--seed', '-s', type=int, default=0,
help='An integer for initialization randomness.')
args = parser.parse_args()
if __name__ == '__main__':
save_loc = namer(
f'epoch{args.epoch}', f'lr{args.learning_rate}',
f'decay{args.weight_decay}', f'seed{args.seed}')
set_logger(os.path.join(os.getcwd(), save_loc), 'info.log')
logging.info(__doc__)
logging.info(args)
set_printoptions()
seed_everywhere_torch(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
record = Recorder('test_acc', 'test_acc2', 'test_acc3', 'test_loss', 'test_loss2', 'test_loss3')
train_loader, test_loader, img_size = load_dataset(
num_train_batch=args.train_batch, num_test_batch=args.test_batch,
num_extra_batch=0, num_worker=8, dataset='mnist')
model1 = model.NetworkWithSub1()
updaterW1_1 = UpdateMomentum()
updaterB1_1 = UpdateMomentum()
model1.to(device)
BETA = 0.9
t1 = time.time()
for i in range(args.epoch):
# Accumulating variables.
total_train_loss = 0
train_correct = 0
train_total = 0
total_test_loss = 0
test_correct = 0
test_total = 0
model1.train()
args.learning_rate = args.learning_rate/3 if i % 200 == 0 and i != 0 else args.learning_rate
for train_data, train_label in train_loader:
model1.zero_grad()
train_data, train_label = train_data.to(device), train_label.to(device)
train_output = model1.forward(train_data)
train_loss = nn.CrossEntropyLoss()(
train_output, train_label) #+ l2_weight_decay(args.weight_decay2, model2.w1)
train_loss.backward()
total_train_loss += train_loss.item()
_, train_predicted = torch.max(train_output.data, 1)
train_correct += (train_predicted == train_label).sum().item()
train_total += train_label.data.size(0)
model1.w1.data = updaterW1_1.update(
model1.w1.data, BETA, args.learning_rate, model1.w1.grad.data)
model1.b1.data = updaterB1_1.update(
model1.b1.data, BETA, args.learning_rate, model1.b1.grad.data)
logging.info(f'Epoch: {i + 1}')
logging.info(f'Train Accuracy: {train_correct/train_total}, \nLoss: {total_train_loss/train_total}')
with torch.no_grad():
model1.eval()
for test_data, test_label in test_loader:
test_data, test_label = test_data.to(device), test_label.to(device)
test_output = model1.forward(test_data)
test_loss = nn.CrossEntropyLoss()(test_output, test_label)
total_test_loss += test_loss.item()
_, test_predicted = torch.max(test_output.data, 1)
test_correct += (test_predicted == test_label).sum().item()
test_total += test_label.data.size(0)
if record.more_than_highest('test_acc', test_correct/test_total):
save_model(model1, os.path.join(os.getcwd(), save_loc, 'checkpoint.pth'))
logging.info(f'Save model')
t2 = time.time() - t1
logging.info(f'Test Accuracy: {test_correct/test_total}, \nLoss: {total_test_loss/test_total}')
record.record('test_acc', test_correct/test_total)
logging.info(f'Learning rate {args.learning_rate}')
logging.info(f'Timer: {to_hhmmss(t2)}')
logging.info(f'=====================================================================================')
record.save_all(os.path.join(os.getcwd(), save_loc))
logging.info(f'best test_acc: {record.highest("test_acc")}')
logging.info(f'model1:w1 = {model1.w1.data}')
record.plot(
'test_acc', save=True,
save_loc=os.path.join(os.getcwd(), save_loc, 'test_acc.png'))
np.savetxt(
os.path.join(os.getcwd(), save_loc, f'{record.highest("test_acc")}.txt'),
record.highest("test_acc"), delimiter=',')
| 46.174603
| 117
| 0.653489
| 775
| 5,818
| 4.682581
| 0.27871
| 0.033343
| 0.04216
| 0.016533
| 0.117112
| 0.08625
| 0.054836
| 0.020391
| 0.020391
| 0
| 0
| 0.024908
| 0.206428
| 5,818
| 125
| 118
| 46.544
| 0.7611
| 0.067377
| 0
| 0
| 0
| 0
| 0.224151
| 0.061115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12381
| 0
| 0.12381
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88cfb3f12d8dd1b1dd5417858d82ba8a891b227e
| 7,694
|
py
|
Python
|
etc/dbus-serialbattery/battery.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/battery.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/battery.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utils
class Protection(object):
# 2 = Alarm, 1 = Warning, 0 = Normal
def __init__(self):
self.voltage_high = None
self.voltage_low = None
self.voltage_cell_low = None
self.soc_low = None
self.current_over = None
self.current_under = None
self.cell_imbalance = None
self.internal_failure = None
self.temp_high_charge = None
self.temp_low_charge = None
self.temp_high_discharge = None
self.temp_low_discharge = None
class Cell:
voltage = None
balance = None
def __init__(self, balance):
self.balance = balance
class Battery(object):
def __init__(self, port, baud):
self.port = port
self.baud_rate = baud
self.role = 'battery'
self.type = 'Generic'
self.poll_interval = 1000
self.hardware_version = None
self.voltage = None
self.current = None
self.capacity_remain = None
self.capacity = None
self.cycles = None
self.total_ah_drawn = None
self.production = None
self.protection = Protection()
self.version = None
self.soc = None
self.charge_fet = None
self.discharge_fet = None
self.cell_count = None
self.temp_sensors = None
self.temp1 = None
self.temp2 = None
self.cells = []
self.control_charging = None
self.control_voltage = None
self.control_current = None
self.control_previous_total = None
self.control_previous_max = None
self.control_discharge_current = None
self.control_charge_current = None
self.control_allow_charge = None
# max battery charge/discharge current
self.max_battery_current = None
self.max_battery_discharge_current = None
def test_connection(self):
# Each driver must override this function to test if a connection can be made
# return false when fail, true if successful
return false
def get_settings(self):
# Each driver must override this function to read/set the battery settings
# It is called once after a successful connection by DbusHelper.setup_vedbus()
# Values: battery_type, version, hardware_version, min_battery_voltage, max_battery_voltage,
# MAX_BATTERY_CURRENT, MAX_BATTERY_DISCHARGE_CURRENT, cell_count, capacity
# return false when fail, true if successful
return false
def refresh_data(self):
# Each driver must override this function to read battery data and populate this class
# It is called each poll just before the data is published to vedbus
# return false when fail, true if successful
return false
def to_temp(self, sensor, value):
# Keep the temp value between -20 and 100 to handle sensor issues or no data.
# The BMS should have already protected before those limits have been reached.
if sensor == 1:
self.temp1 = min(max(value, -20), 100)
if sensor == 2:
self.temp2 = min(max(value, -20), 100)
def manage_charge_current(self):
# Start with the current values
# Change depending on the SOC values
if self.soc > 99:
self.control_allow_charge = False
else:
self.control_allow_charge = True
# Change depending on the SOC values
if 98 < self.soc <= 100:
self.control_charge_current = 1
elif 95 < self.soc <= 97:
self.control_charge_current = 4
elif 91 < self.soc <= 95:
self.control_charge_current = self.max_battery_current/2
else:
self.control_charge_current = self.max_battery_current
# Change depending on the SOC values
if self.soc <= 20:
self.control_discharge_current = 5
elif 20 < self.soc <= 30:
self.control_discharge_current = self.max_battery_discharge_current/4
elif 30 < self.soc <= 35:
self.control_discharge_current = self.max_battery_discharge_current/2
else:
self.control_discharge_current = self.max_battery_discharge_current
def get_min_cell(self):
min_voltage = 9999
min_cell = None
if len(self.cells) == 0 and hasattr(self, 'cell_min_no'):
return self.cell_min_no
for c in range(min(len(self.cells), self.cell_count)):
if self.cells[c].voltage is not None and min_voltage > self.cells[c].voltage:
min_voltage = self.cells[c].voltage
min_cell = c
return min_cell
def get_max_cell(self):
max_voltage = 0
max_cell = None
if len(self.cells) == 0 and hasattr(self, 'cell_max_no'):
return self.cell_max_no
for c in range(min(len(self.cells), self.cell_count)):
if self.cells[c].voltage is not None and max_voltage < self.cells[c].voltage:
max_voltage = self.cells[c].voltage
max_cell = c
return max_cell
def get_min_cell_desc(self):
cell_no = self.get_min_cell()
if cell_no is None:
return None
return 'C' + str(cell_no + 1)
def get_max_cell_desc(self):
cell_no = self.get_max_cell()
if cell_no is None:
return None
return 'C' + str(cell_no + 1)
def get_min_cell_voltage(self):
min_voltage = 9999
if len(self.cells) == 0 and hasattr(self, 'cell_min_voltage'):
return self.cell_min_voltage
for c in range(min(len(self.cells), self.cell_count)):
if self.cells[c].voltage is not None and min_voltage > self.cells[c].voltage:
min_voltage = self.cells[c].voltage
return min_voltage
def get_max_cell_voltage(self):
max_voltage = 0
if len(self.cells) == 0 and hasattr(self, 'cell_max_voltage'):
return self.cell_max_voltage
for c in range(min(len(self.cells), self.cell_count)):
if self.cells[c].voltage is not None and max_voltage < self.cells[c].voltage:
max_voltage = self.cells[c].voltage
return max_voltage
def get_balancing(self):
for c in range(min(len(self.cells), self.cell_count)):
if self.cells[c].balance is not None and self.cells[c].balance:
return 1
return 0
def get_temp(self):
if self.temp1 is not None and self.temp2 is not None:
return round((float(self.temp1) + float(self.temp2)) / 2, 2)
if self.temp1 is not None and self.temp2 is None:
return round(float(self.temp1) , 2)
if self.temp1 is None and self.temp2 is not None:
return round(float(self.temp2) , 2)
else:
return None
def get_min_temp(self):
if self.temp1 is not None and self.temp2 is not None:
return min(self.temp1, self.temp2)
if self.temp1 is not None and self.temp2 is None:
return self.temp1
if self.temp1 is None and self.temp2 is not None:
return self.temp2
else:
return None
def get_max_temp(self):
if self.temp1 is not None and self.temp2 is not None:
return max(self.temp1, self.temp2)
if self.temp1 is not None and self.temp2 is None:
return self.temp1
if self.temp1 is None and self.temp2 is not None:
return self.temp2
else:
return None
| 35.786047
| 101
| 0.618144
| 1,049
| 7,694
| 4.35367
| 0.154433
| 0.061309
| 0.033501
| 0.044668
| 0.497482
| 0.463981
| 0.456098
| 0.437048
| 0.408583
| 0.335231
| 0
| 0.021005
| 0.306992
| 7,694
| 215
| 102
| 35.786047
| 0.835521
| 0.13712
| 0
| 0.289157
| 0
| 0
| 0.010574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108434
| false
| 0
| 0.012048
| 0.018072
| 0.325301
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88d2d07a327a91f5956c30f18f5820061fc0b593
| 16,227
|
py
|
Python
|
lumicks/pylake/kymotracker/detail/binding_times.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 8
|
2019-02-18T07:56:39.000Z
|
2022-03-19T01:14:48.000Z
|
lumicks/pylake/kymotracker/detail/binding_times.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 42
|
2018-11-30T14:40:35.000Z
|
2022-03-29T11:43:45.000Z
|
lumicks/pylake/kymotracker/detail/binding_times.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 4
|
2019-01-09T13:45:53.000Z
|
2021-07-06T14:06:52.000Z
|
import numpy as np
from scipy.special import logsumexp
from scipy.optimize import minimize
from functools import partial
from dataclasses import dataclass, field
import matplotlib.pyplot as plt
@dataclass
class BindingDwelltimesBootstrap:
"""Bootstrap distributions for a binding dwelltime model.
This class is stored in the `BindingDwelltime.bootstrap` attribute
and should not be constructed manually.
Attributes
----------
_samples : np.ndarray
array of optimized model parameters for each bootstrap sample pull; shape is
[number of parameters, number of samples]
"""
_samples: np.ndarray = field(default_factory=lambda: np.array([]), repr=False)
def _sample_distributions(self, optimized, iterations):
"""Construct bootstrap distributions for parameters.
For each iteration, a dataset is randomly selected (with replacement) with the same
size as the data used to optimize the model. Model parameters are then optimized
for this new sampled dataset.
Parameters
----------
optimized : BindingDwelltimes
optimized model results
iterations : int
number of iterations (random samples) to use for the bootstrap
"""
n_data = optimized.dwelltimes_sec.size
self._samples = np.empty((optimized._parameters.size, iterations))
for itr in range(iterations):
sample = np.random.choice(optimized.dwelltimes_sec, size=n_data, replace=True)
result = _kinetic_mle_optimize(
optimized.n_components,
sample,
*optimized.observation_limits,
initial_guess=optimized._parameters,
)
self._samples[:, itr] = result._parameters
@property
def n_samples(self):
"""Number of samples in the bootstrap."""
return self._samples.shape[1]
@property
def n_components(self):
"""Number of components in the model."""
return int(self._samples.shape[0] / 2)
@property
def amplitude_distributions(self):
"""Array of sample optimized amplitude parameters; shape is
[number of components, number of samples]"""
return self._samples[: self.n_components]
@property
def lifetime_distributions(self):
"""Array of sample optimized lifetime parameters; shape is
[number of components, number of samples]"""
return self._samples[self.n_components :]
def calculate_stats(self, key, component, alpha=0.05):
"""Calculate the mean and confidence intervals of the bootstrap distribution for a parameter.
*NOTE*: the `100*(1-alpha)` % confidence intervals calculated here correspond to the
`100*(alpha/2)` and `100*(1-(alpha/2))` quantiles of the distribution. For distributions
which are not well approximated by a normal distribution these values are not reliable
confidence intervals.
Parameters
----------
key : {'amplitude', 'lifetime'}
name of the parameter to be analyzed
component : int
index of the component to be analyzed
alpha : float
confidence intervals are calculated as 100*(1-alpha)%
"""
if key not in ("amplitude", "lifetime"):
raise KeyError("key must be either 'amplitude' or 'lifetime'")
data = getattr(self, f"{key}_distributions")[component]
mean = np.mean(data)
lower = np.quantile(data, alpha / 2)
upper = np.quantile(data, 1 - (alpha / 2))
return mean, (lower, upper)
def plot(self, alpha=0.05, n_bins=25, hist_kwargs={}, span_kwargs={}, line_kwargs={}):
"""Plot the bootstrap distributions for the parameters of a model.
Parameters
----------
alpha : float
confidence intervals are calculated as 100*(1-alpha)%
n_bins : int
number of bins in the histogram
hist_kwargs : dict
dictionary of plotting kwargs applied to histogram
span_kwargs : dict
dictionary of plotting kwargs applied to the patch indicating the area
spanned by the confidence intervals
line_kwargs : dict
dictionary of plotting kwargs applied to the line indicating the
distribution means
"""
hist_kwargs = {"facecolor": "#c5c5c5", "edgecolor": "#888888", **hist_kwargs}
span_kwargs = {"facecolor": "tab:red", "alpha": 0.3, **span_kwargs}
line_kwargs = {"color": "k", **line_kwargs}
def plot_axes(data, key, component, use_index):
plt.hist(data, bins=n_bins, **hist_kwargs)
mean, (lower, upper) = self.calculate_stats(key, component, alpha)
plt.axvspan(lower, upper, **span_kwargs)
plt.axvline(mean, **line_kwargs)
plt.xlabel(f"{key}" if key == "amplitude" else f"{key} (sec)")
plt.ylabel("counts")
label = "a" if key == "amplitude" else r"\tau"
unit = "" if key == "amplitude" else "sec"
prefix = fr"${label}_{component+1}$" if use_index else fr"${label}$"
plt.title(f"{prefix} = {mean:0.2g} ({lower:0.2g}, {upper:0.2g}) {unit}")
if self.n_components == 1:
data = self.lifetime_distributions.squeeze()
plot_axes(data, "lifetime", 0, False)
else:
for component in range(2):
for column, key in enumerate(("amplitude", "lifetime")):
data = getattr(self, f"{key}_distributions")[component]
column += 1
plt.subplot(self.n_components, 2, 2 * component + column)
plot_axes(data, key, component, True)
plt.tight_layout()
@dataclass(frozen=True)
class BindingDwelltimes:
"""Results of exponential mixture model optimization for binding dwelltimes.
This class is returned from `_kinetic_mle_optimize()` and should not be
constructed manually.
Attributes
----------
n_components : int
number of components in the model.
dwelltimes_sec : np.ndarray
observations on which the model was trained.
observations_limits : tuple
tuple of (`min`, `max`) values of the experimental observation time.
_parameters : np.ndarray
optimized parameters in the order [amplitudes, lifetimes]
log_likelihood : float
log likelihood of the trained model
bootstrap : BindingDwelltimesBootstrap
object containing information about the bootstrapping analysis.
"""
n_components: int
dwelltimes_sec: np.ndarray = field(repr=False)
observation_limits: list = field(repr=False)
_parameters: np.ndarray = field(repr=False)
log_likelihood: float
bootstrap: BindingDwelltimesBootstrap = field(
default_factory=BindingDwelltimesBootstrap, init=False, repr=False
)
@property
def amplitudes(self):
"""Fractional amplitude of each model component"""
return self._parameters[: self.n_components]
@property
def lifetimes(self):
"""Lifetime parameter (in seconds) of each model component."""
return self._parameters[self.n_components :]
@property
def aic(self):
"""Akaike Information Criterion."""
k = (2 * self.n_components) - 1 # number of parameters
return 2 * k - 2 * self.log_likelihood
@property
def bic(self):
"""Bayesian Information Criterion."""
k = (2 * self.n_components) - 1 # number of parameters
n = self.dwelltimes_sec.size # number of observations
return k * np.log(n) - 2 * self.log_likelihood
def calculate_bootstrap(self, iterations=500):
self.bootstrap._sample_distributions(self, iterations)
def plot(
self,
n_bins=25,
bin_spacing="linear",
hist_kwargs={},
component_kwargs={},
fit_kwargs={},
xscale=None,
yscale=None,
):
"""Plot the dwelltime distribution histogram and overlayed model density.
Parameters
----------
n_bins : int
number of bins in the histogram
bin_spacing : {"log", "linear"}
determines how bin edges are spaced apart
hist_kwargs : dict
dictionary of plotting kwargs applied to histogram
component_kwargs : dict
dictionary of plotting kwargs applied to the line plot for each component
fit_kwargs : dict
dictionary of plotting kwargs applied to line plot for the total fit
xscale : {"log", "linear", None}
scaling for the x-axis; when `None` default is "linear"
yscale : {"log", "linear", None}
scaling for the y-axis; when `None` default is same as `bin_spacing`
"""
if bin_spacing == "log":
scale = np.logspace
limits = (np.log10(self.dwelltimes_sec.min()), np.log10(self.dwelltimes_sec.max()))
xscale = "linear" if xscale is None else xscale
yscale = "log" if yscale is None else yscale
elif bin_spacing == "linear":
scale = np.linspace
limits = (self.dwelltimes_sec.min(), self.dwelltimes_sec.max())
xscale = "linear" if xscale is None else xscale
yscale = "linear" if yscale is None else yscale
else:
raise ValueError("spacing must be either 'log' or 'linear'")
bins = scale(*limits, n_bins)
centers = bins[:-1] + (bins[1:] - bins[:-1]) / 2
hist_kwargs = {"facecolor": "#cdcdcd", "edgecolor": "#aaaaaa", **hist_kwargs}
component_kwargs = {"marker": "o", "ms": 3, **component_kwargs}
fit_kwargs = {"color": "k", **fit_kwargs}
components = np.exp(
exponential_mixture_log_likelihood_components(
self.amplitudes, self.lifetimes, centers, *self.observation_limits
)
)
def label_maker(a, t, n):
if self.n_components == 1:
amplitude = ""
lifetime_label = r"$\tau$"
else:
amplitude = f"($a_{n}$ = {a:0.2g}) "
lifetime_label = fr"$\tau_{n}$"
return f"{amplitude}{lifetime_label} = {t:0.2g} sec"
# plot histogram
density, _, _ = plt.hist(self.dwelltimes_sec, bins=bins, density=True, **hist_kwargs)
# plot individual components
for n in range(self.n_components):
label = label_maker(self.amplitudes[n], self.lifetimes[n], n + 1)
plt.plot(centers, components[n], label=label, **component_kwargs)
# plot total fit
label = r"$\ln \mathcal{L} $" + f"= {self.log_likelihood:0.3f}"
plt.plot(centers, np.sum(components, axis=0), label=label, **fit_kwargs)
# rearrange legend entries so that total fit is first
legend_components = [[c[-1], *c[:-1]] for c in plt.gca().get_legend_handles_labels()]
plt.legend(*legend_components, loc="upper right")
# format axes
plt.xscale(xscale)
plt.yscale(yscale)
if yscale == "log":
ylim = (np.min(density[density != 0] * 0.5), np.max(density[density != 0] * 1.5))
plt.ylim(ylim)
plt.ylabel("density")
plt.xlabel("dwelltime (sec)")
plt.tight_layout()
def exponential_mixture_log_likelihood_components(
amplitudes, lifetimes, t, min_observation_time, max_observation_time
):
"""Calculate each component of the log likelihood of an exponential mixture distribution.
The full log likelihood for a single observation is given by:
log(L) = log( sum_i( component_i ) )
with the output of this function being log(component_i) defined as:
log(component_i) = log(a_i) - log(N) + log(tau_i) - t/tau_i
where a_i and tau_i are the amplitude and lifetime of component i and N is a normalization
factor that takes into account the minimum and maximum observation times of the experiment:
N = sum_i { a_i * [ exp(-t_min / tau_i) - exp(-t_max / tau_i) ] }
Therefore, the full log likelihood is calculated from the output of this function by applying
logsumexp(output, axis=0) where the summation is taken over the components.
Parameters
----------
amplitudes : array_like
fractional amplitude parameters for each component
lifetimes : array_like
lifetime parameters for each component in seconds
t : array_like
dwelltime observations in seconds
min_observation_time : float
minimum observation time in seconds
max_observation_time : float
maximum observation time in seconds
"""
amplitudes = amplitudes[:, np.newaxis]
lifetimes = lifetimes[:, np.newaxis]
t = t[np.newaxis, :]
norm_factor = np.log(amplitudes) + np.log(
np.exp(-min_observation_time / lifetimes) - np.exp(-max_observation_time / lifetimes)
)
log_norm_factor = logsumexp(norm_factor, axis=0)
return -log_norm_factor + np.log(amplitudes) - np.log(lifetimes) - t / lifetimes
def exponential_mixture_log_likelihood(params, t, min_observation_time, max_observation_time):
"""Calculate the log likelihood of an exponential mixture distribution.
The full log likelihood for a single observation is given by:
log(L) = log( sum_i( exp( log(component_i) ) ) )
where log(component_i) is output from `exponential_mixture_log_likelihood_components()`
Parameters
----------
amplitudes : array_like
fractional amplitude parameters for each component
lifetimes : array_like
lifetime parameters for each component in seconds
t : array_like
dwelltime observations in seconds
min_observation_time : float
minimum observation time in seconds
max_observation_time : float
maximum observation time in seconds
"""
params = np.reshape(params, (2, -1))
components = exponential_mixture_log_likelihood_components(
params[0], params[1], t, min_observation_time, max_observation_time
)
log_likelihood = logsumexp(components, axis=0)
return -np.sum(log_likelihood)
def _kinetic_mle_optimize(
n_components, t, min_observation_time, max_observation_time, initial_guess=None
):
"""Calculate the maximum likelihood estimate of the model parameters given measured dwelltimes.
Parameters
----------
n_components : int
number of components in the mixture model
t : array_like
dwelltime observations in seconds
min_observation_time : float
minimum observation time in seconds
max_observation_time : float
maximum observation time in seconds
initial_guess : array_like
initial guess for the model parameters ordered as
[amplitude1, amplitude2, ..., lifetime1, lifetime2, ...]
"""
if np.any(np.logical_or(t < min_observation_time, t > max_observation_time)):
raise ValueError(
"some data is outside of the bounded region. Please choose"
"appropriate values for `min_observation_time` and/or `max_observation_time`."
)
cost_fun = partial(
exponential_mixture_log_likelihood,
t=t,
min_observation_time=min_observation_time,
max_observation_time=max_observation_time,
)
if initial_guess is None:
initial_guess_amplitudes = np.ones(n_components) / n_components
initial_guess_lifetimes = np.mean(t) * np.arange(1, n_components + 1)
initial_guess = np.hstack([initial_guess_amplitudes, initial_guess_lifetimes])
bounds = (
*[(np.finfo(float).eps, 1) for _ in range(n_components)],
*[(min_observation_time * 0.1, max_observation_time * 1.1) for _ in range(n_components)],
)
constraints = {"type": "eq", "fun": lambda x, n: 1 - sum(x[:n]), "args": [n_components]}
result = minimize(
cost_fun, initial_guess, method="SLSQP", bounds=bounds, constraints=constraints
)
return BindingDwelltimes(
n_components, t, (min_observation_time, max_observation_time), result.x, -result.fun
)
| 38.913669
| 101
| 0.638442
| 1,947
| 16,227
| 5.178737
| 0.176168
| 0.052068
| 0.024993
| 0.013191
| 0.325102
| 0.278588
| 0.249727
| 0.231578
| 0.200932
| 0.179907
| 0
| 0.009721
| 0.264621
| 16,227
| 416
| 102
| 39.007212
| 0.835247
| 0.378443
| 0
| 0.121827
| 0
| 0.005076
| 0.082972
| 0.013124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091371
| false
| 0
| 0.030457
| 0
| 0.233503
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88d788b313f88688621166e29c634f6bf47ff41a
| 508
|
py
|
Python
|
GUI/crab.py
|
31337H4X0R/crab-tracker
|
c822a40010d172ba797b5de8c340931d0feea6e4
|
[
"MIT"
] | 1
|
2019-07-31T01:32:17.000Z
|
2019-07-31T01:32:17.000Z
|
GUI/crab.py
|
31337H4X0R/crab-tracker
|
c822a40010d172ba797b5de8c340931d0feea6e4
|
[
"MIT"
] | 47
|
2017-11-04T02:04:42.000Z
|
2018-06-16T01:00:48.000Z
|
GUI/crab.py
|
31337H4X0R/crab-tracker
|
c822a40010d172ba797b5de8c340931d0feea6e4
|
[
"MIT"
] | 2
|
2018-06-10T21:58:49.000Z
|
2019-06-18T17:21:03.000Z
|
class Crab:
def __init__(self, crab_id, sex, species, color, damage, carapace, mass, epibiont, molt):
self.id = crab_id
self.sex = sex
self.species = species
self.color = color
self.damage = damage
self.carapace = carapace
self.mass = mass
self.epibiont = epibiont
self.molt = molt
def get_tuple(self):
return self.id, self.sex, self.species, self.color, self.damage, self.carapace, self.mass, self.epibiont, self.molt
| 33.866667
| 123
| 0.622047
| 66
| 508
| 4.681818
| 0.257576
| 0.038835
| 0.058252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277559
| 508
| 14
| 124
| 36.285714
| 0.841962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0.076923
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88da52cb71a753a4cfdc782a27d2b76618927365
| 2,821
|
py
|
Python
|
build/fbcode_builder_config.py
|
YangKian/LogDevice
|
e5c2168c11e9de867a1bcf519f95016e1c879b5c
|
[
"BSD-3-Clause"
] | 1,831
|
2018-09-12T15:41:52.000Z
|
2022-01-05T02:38:03.000Z
|
build/fbcode_builder_config.py
|
YangKian/LogDevice
|
e5c2168c11e9de867a1bcf519f95016e1c879b5c
|
[
"BSD-3-Clause"
] | 183
|
2018-09-12T16:14:59.000Z
|
2021-12-07T15:49:43.000Z
|
build/fbcode_builder_config.py
|
YangKian/LogDevice
|
e5c2168c11e9de867a1bcf519f95016e1c879b5c
|
[
"BSD-3-Clause"
] | 228
|
2018-09-12T15:41:51.000Z
|
2022-01-05T08:12:09.000Z
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import specs.fizz as fizz
import specs.fmt as fmt
import specs.folly as folly
import specs.sodium as sodium
import specs.wangle as wangle
import specs.zstd as zstd
from shell_quoting import ShellQuoted
"fbcode_builder steps to build & test LogDevice"
"""
Running this script from the command line on a dev-server:
1. Ensure you have the HTTP proxy configured in environment
2. This is env items is not compatible with the scutil create call, so must
not be permenently exported.
git config --global http.proxy http://fwdproxy:8080
cd .../fbcode/logdevice/public_tld/build
HTTP_PROXY=http://fwdproxy:8080 HTTPS_PROXY=http://fwdproxy:8080 \
fbcode/opensource/fbcode_builder/facebook_make_legocastle_job.py \
| scutil create
Which outputs a legocastle job to stdout; to be fed into scutil create ...
"""
class FakeClangModule:
"""
fbcode_builder doesn't allow us to inject build stuff before building
dependencies. This is a hack to point set the compiler to clang by injecting
it as a fake module that runs before any other dependency.
"""
@staticmethod
def fbcode_builder_spec(builder):
return {
"depends_on": [],
"steps": [
builder.set_env("CC", "clang-9"),
builder.set_env("CXX", "clang++-9"),
],
}
def fbcode_builder_spec(builder):
# This API should change rarely, so build the latest tag instead of master.
builder.add_option(
"no1msd/mstch:git_hash", ShellQuoted("$(git describe --abbrev=0 --tags)")
)
builder.add_option("PYTHON_VENV", "ON")
builder.add_option(
"LogDevice/logdevice/_build:cmake_defines", {"BUILD_SUBMODULES": "OFF"}
)
builder.add_option(
"facebook/folly:cmake_defines",
{"BUILD_SHARED_LIBS": "ON", "BUILD_TESTS": "OFF", "FOLLY_USE_JEMALLOC": "OFF"},
)
return {
"depends_on": [FakeClangModule, zstd, fmt, folly, fizz, wangle, sodium],
"steps": [
# This isn't a separete spec, since only fbthrift uses mstch.
builder.github_project_workdir("no1msd/mstch", "build"),
builder.cmake_install("no1msd/mstch"),
builder.fb_github_cmake_install("fbthrift/thrift"),
builder.fb_github_cmake_install(
"LogDevice/logdevice/_build", github_org="facebookincubator"
),
],
}
config = {
"github_project": "facebookincubator/LogDevice",
"fbcode_builder_spec": fbcode_builder_spec,
}
| 31.344444
| 87
| 0.684864
| 367
| 2,821
| 5.111717
| 0.493188
| 0.048507
| 0.036247
| 0.033582
| 0.084222
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009046
| 0.216235
| 2,821
| 89
| 88
| 31.696629
| 0.839439
| 0.197093
| 0
| 0.23913
| 0
| 0
| 0.269529
| 0.084675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.173913
| 0.021739
| 0.282609
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88dac5f28a975211597a7acd699981246fdfddd1
| 1,883
|
py
|
Python
|
Python-Programs/Discord-bot-Motivation Bot/main.py
|
adityaverma121/Simple-Programs
|
8450560b97f89e0fa3da16a623ad35c0b26409c9
|
[
"MIT"
] | 71
|
2021-09-30T11:25:12.000Z
|
2021-10-03T11:33:22.000Z
|
Python-Programs/Discord-bot-Motivation Bot/main.py
|
adityaverma121/Simple-Programs
|
8450560b97f89e0fa3da16a623ad35c0b26409c9
|
[
"MIT"
] | 186
|
2021-09-30T12:25:16.000Z
|
2021-10-03T13:45:04.000Z
|
Python-Programs/Discord-bot-Motivation Bot/main.py
|
adityaverma121/Simple-Programs
|
8450560b97f89e0fa3da16a623ad35c0b26409c9
|
[
"MIT"
] | 385
|
2021-09-30T11:34:23.000Z
|
2021-10-03T13:41:00.000Z
|
import json
import os
import random
import string
import requests
from keep_alive import keep_alive
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import discord
client = discord.Client()
starter_motivator = [
"Cheer Up!",
"Always remember, I am here for you!",
"You are a great person. Remember this!",
"Think positive man! There is always a bright side!",
"What about you watching a funny video to swing the mood?",
]
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = f"`{json_data[0]['q']}`" + " -" + json_data[0]["a"]
return quote
@client.event
async def on_ready():
print("Logged in as {0.user}".format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
msg = message.content.lower()
if (
(msg.startswith("$hello"))
or (msg.startswith("$hi"))
or (msg.startswith("$hey"))
):
await message.channel.send(
"Hello there! Nice to see you !!\nHow are you feeling?"
)
if msg.startswith("$motivate"):
quote = get_quote()
await message.channel.send(quote)
if msg.startswith("$help"):
await message.channel.send(
"This is bot help.\nCommands:\n*` $hey, $hello, $hi `:- Bot responds.\n*` $motivate `:- Generates motivating quotes.\n*` $help `:- Bot help."
)
cleaned_text = msg.translate(str.maketrans("", "", string.punctuation))
score = SentimentIntensityAnalyzer().polarity_scores(cleaned_text)
neg = score["neg"]
pos = score["pos"]
if neg > pos:
await message.channel.send(
"I am sensing `Negative Sentiment` from you.\n"
+ f"`{random.choice(starter_motivator)}`"
)
keep_alive()
client.run(os.environ["TOKEN"])
| 25.445946
| 153
| 0.630377
| 238
| 1,883
| 4.92437
| 0.483193
| 0.055461
| 0.064846
| 0.078498
| 0.035836
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002068
| 0.229421
| 1,883
| 73
| 154
| 25.794521
| 0.805651
| 0
| 0
| 0.090909
| 0
| 0.018182
| 0.305895
| 0.030271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.145455
| 0
| 0.2
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88df26756f4d2511b8925b4ee5ec1ed8cec09d0b
| 1,234
|
py
|
Python
|
utils.py
|
wangke0809/learn-statistical-learning-method
|
10772659ff52ef64e7ff36dd3b701615e58de335
|
[
"MIT"
] | null | null | null |
utils.py
|
wangke0809/learn-statistical-learning-method
|
10772659ff52ef64e7ff36dd3b701615e58de335
|
[
"MIT"
] | null | null | null |
utils.py
|
wangke0809/learn-statistical-learning-method
|
10772659ff52ef64e7ff36dd3b701615e58de335
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
def load_mnist(path='mnist'):
data_dir = os.path.join("./data", path)
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float)
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float)
trY = np.asarray(trY)
teY = np.asarray(teY)
# X = np.concatenate((trX, teX), axis=0)
# y = np.concatenate((trY, teY), axis=0).astype(np.int)
# seed = 547
# np.random.seed(seed)
# np.random.shuffle(X)
# np.random.seed(seed)
# np.random.shuffle(y)
seed = 200
np.random.seed(seed)
np.random.shuffle(trX)
np.random.seed(seed)
np.random.shuffle(trY)
return (trX, trY, teX, teY)
| 29.380952
| 63
| 0.638574
| 198
| 1,234
| 3.949495
| 0.262626
| 0.081841
| 0.063939
| 0.089514
| 0.612532
| 0.612532
| 0.612532
| 0.453964
| 0.411765
| 0.411765
| 0
| 0.054795
| 0.171799
| 1,234
| 42
| 64
| 29.380952
| 0.710372
| 0.15154
| 0
| 0.25
| 0
| 0
| 0.097115
| 0.086538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88e15f87d60572f7354fdd0f31ccffeddc289a42
| 2,210
|
py
|
Python
|
src/analysis.py
|
aliFrancis/mars-crater-catalogue
|
5e6ac4e1f7967b1d37d95e436edaa31ef2f2ed55
|
[
"CC-BY-4.0"
] | null | null | null |
src/analysis.py
|
aliFrancis/mars-crater-catalogue
|
5e6ac4e1f7967b1d37d95e436edaa31ef2f2ed55
|
[
"CC-BY-4.0"
] | null | null | null |
src/analysis.py
|
aliFrancis/mars-crater-catalogue
|
5e6ac4e1f7967b1d37d95e436edaa31ef2f2ed55
|
[
"CC-BY-4.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from utils import convert, iou
def average_pairwise_IOU(IOU_mat):
n = IOU_mat.shape[0]
mean_IOU = (np.sum(IOU_mat)-n)/(np.size(IOU_mat)-n)
return mean_IOU
def group_IOU_matrices(paths):
survey_names = [p.replace('.xml','')[-6:] for p in paths]
surveys = [convert.xml2df(p) for p in paths]
binary_IOUs = []
IOUs = []
for i,s_i in enumerate(surveys):
iou_i = []
for j,s_j in enumerate(surveys):
if j!=i:
iou_i.append(iou.all_ious_np(s_i,s_j))
iou_i = np.concatenate(iou_i,axis=1) #Compare 1 person's annotations to everyone else's
iou_max_i = np.max(iou_i,axis=1)
binary_IOU_i = iou_max_i>=0.5
binary_IOUs.append(np.mean(binary_IOU_i))
IOUs.append(np.mean(iou_max_i))
return binary_IOUs, IOUs
if __name__ == '__main__':
import os
import sys
survey_dir = sys.argv[1]
paths = [os.path.join(survey_dir,path) for path in os.listdir(survey_dir)]
surveys = [convert.xml2df(p) for p in paths]
print('\nANALYSIS OF {}'.format(os.path.basename(survey_dir)),'\n')
print(' NO. OF ANNOTATIONS')
print(' ------------------')
for survey,path in zip(surveys,paths):
print(' ',os.path.basename(path).replace('.xml','')+':',len(survey))
total_survey = convert.dfs2df(surveys)
print(' ____________')
print(' TOTAL :',len(total_survey))
print('\n')
group_binary_IOUs, group_IOUs = group_IOU_matrices(paths)
print(' MEAN IoU')
print(' --------')
for i,path in enumerate(paths):
print(' ',os.path.basename(path).replace('.xml','')+':',np.round(group_IOUs[i],4))
print(' ____________')
print(' MEAN :',np.round(np.mean(group_IOUs),4))
print('\n')
print('\n MEAN BINARY IoU (IoU treated as 1 if above 0.5)')
print(' -----------------------------------------------')
for i,path in enumerate(paths):
print(' ',os.path.basename(path).replace('.xml','')+':',np.round(group_binary_IOUs[i],4))
print(' ____________')
print(' MEAN :',np.round(np.mean(group_binary_IOUs),4))
print('\n')
| 34.53125
| 97
| 0.608597
| 326
| 2,210
| 3.831288
| 0.251534
| 0.022418
| 0.044836
| 0.026421
| 0.261009
| 0.261009
| 0.261009
| 0.261009
| 0.179343
| 0.179343
| 0
| 0.01021
| 0.202262
| 2,210
| 63
| 98
| 35.079365
| 0.698242
| 0.022172
| 0
| 0.181818
| 0
| 0
| 0.128241
| 0.021759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.127273
| 0
| 0.2
| 0.345455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88e45fe24cb4ac33e12b90a494c738b76fd18630
| 3,033
|
py
|
Python
|
scripts/test_tensorflow_spectrogram.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | 29
|
2017-07-10T14:49:15.000Z
|
2022-02-02T23:14:38.000Z
|
scripts/test_tensorflow_spectrogram.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 167
|
2015-03-17T14:45:22.000Z
|
2022-03-30T21:00:05.000Z
|
scripts/test_tensorflow_spectrogram.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 4
|
2015-02-06T03:30:27.000Z
|
2020-12-27T08:38:52.000Z
|
"""
Compares spectrogram computations with TensorFlow and Vesper.
As of 2018-11-09, Vesper is a little more than three times faster than
TensorFlow at computing spectrograms with a DFT size of 128.
"""
import functools
import time
import numpy as np
import tensorflow as tf
import vesper.util.data_windows as data_windows
import vesper.util.time_frequency_analysis_utils as tfa_utils
SHOW_SPECTROGRAMS = False
SAMPLE_RATE = 24000 # Hertz
AMPLITUDE = 1
FREQUENCY = 3000 # Hertz
DURATION = 1000 # seconds
WINDOW_SIZE = .005 # seconds
HOP_SIZE = .5 # fraction of window size
if SHOW_SPECTROGRAMS:
SAMPLE_RATE = 1
FREQUENCY = .25
DURATION = 8
WINDOW_SIZE = 8
HOP_SIZE = 1
def main():
waveform = create_waveform()
window_size = int(round(WINDOW_SIZE * SAMPLE_RATE))
print('Window size is {} samples.'.format(window_size))
hop_size = int(round(window_size * HOP_SIZE))
print('Hop size is {} samples.'.format(hop_size))
gram = compute_tensorflow_spectrogram(waveform, window_size, hop_size)
if SHOW_SPECTROGRAMS:
print(gram)
gram = compute_vesper_spectrogram(waveform, window_size, hop_size)
if SHOW_SPECTROGRAMS:
print(gram)
def create_waveform():
length = int(round(DURATION * SAMPLE_RATE))
print('Waveform length is {} samples.'.format(length))
phases = 2 * np.pi * FREQUENCY / SAMPLE_RATE * np.arange(length)
return AMPLITUDE * np.cos(phases)
def compute_tensorflow_spectrogram(waveform, window_size, hop_size):
waveform_ = tf.placeholder(tf.float32)
window_fn = functools.partial(tf.signal.hann_window, periodic=True)
stft = tf.signal.stft(
waveform_, window_size, hop_size, window_fn=window_fn)
gram = tf.real(stft * tf.conj(stft))
with tf.Session() as sess:
print('Computing TensorFlow spectrogram...')
start_time = time.time()
g = sess.run(gram, feed_dict={waveform_: waveform})
end_time = time.time()
print('Done.')
report_performance(g, start_time, end_time)
return g
def report_performance(gram, start_time, end_time):
num_spectra = len(gram)
delta = end_time - start_time
print('Computed {} spectra in {:.1f} seconds.'.format(num_spectra, delta))
micros = int(round(1000000 * delta / num_spectra))
speedup = DURATION / delta
print((
"That's {} microseconds per spectrum, or {} times faster than "
"real time.").format(micros, speedup))
def compute_vesper_spectrogram(waveform, window_size, hop_size):
window = data_windows.create_window('Hann', window_size).samples
print('Computing Vesper spectrogram...')
start_time = time.time()
gram = tfa_utils.compute_spectrogram(waveform, window, hop_size)
end_time = time.time()
print('Done.')
report_performance(gram, start_time, end_time)
return gram
if __name__ == '__main__':
main()
| 26.146552
| 78
| 0.672272
| 386
| 3,033
| 5.059585
| 0.300518
| 0.071685
| 0.046595
| 0.060932
| 0.287762
| 0.222222
| 0.203277
| 0.174091
| 0.064516
| 0.064516
| 0
| 0.019692
| 0.229805
| 3,033
| 115
| 79
| 26.373913
| 0.816353
| 0.081438
| 0
| 0.157143
| 0
| 0
| 0.099495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.085714
| 0
| 0.2
| 0.157143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88e644ee347cd6bc7d3ce925d9db807476d778e2
| 2,770
|
py
|
Python
|
Station A/Pooling M300/v1_station_a_S30_pooling.py
|
Opentrons/covid19-system-30
|
4db5980a93e87f9f607b727678b7ea6d528109ba
|
[
"Apache-2.0"
] | null | null | null |
Station A/Pooling M300/v1_station_a_S30_pooling.py
|
Opentrons/covid19-system-30
|
4db5980a93e87f9f607b727678b7ea6d528109ba
|
[
"Apache-2.0"
] | null | null | null |
Station A/Pooling M300/v1_station_a_S30_pooling.py
|
Opentrons/covid19-system-30
|
4db5980a93e87f9f607b727678b7ea6d528109ba
|
[
"Apache-2.0"
] | 1
|
2020-07-29T14:52:28.000Z
|
2020-07-29T14:52:28.000Z
|
from opentrons import protocol_api
import json
import os
import math
# metadata
metadata = {
'protocolName': 'V1 S14 Station A MagMax',
'author': 'Nick <protocols@opentrons.com>',
'source': 'Custom Protocol Request',
'apiLevel': '2.4'
}
NUM_SAMPLES = 64
SAMPLE_VOLUME = 100
TIP_TRACK = False
def run(ctx: protocol_api.ProtocolContext):
# load labware
dest_plate = ctx.load_labware(
'nest_96_wellplate_2ml_deep', '2', '96-deepwell sample plate')
tipracks300 = [ctx.load_labware('opentrons_96_filtertiprack_200ul', '1',
'200µl filter tiprack')]
# load pipette
m300 = ctx.load_instrument(
'p300_multi_gen2', 'right', tip_racks=tipracks300)
tip_log = {'count': {}}
folder_path = '/data/A'
tip_file_path = folder_path + '/tip_log.json'
if TIP_TRACK and not ctx.is_simulating():
if os.path.isfile(tip_file_path):
with open(tip_file_path) as json_file:
data = json.load(json_file)
if 'tips1000' in data:
tip_log['count'][m300] = data['tips1000']
else:
tip_log['count'][m300] = 0
else:
tip_log['count'] = {m300: 0}
tip_log['tips'] = {
m300: [tip for rack in tipracks300 for tip in rack.rows()[0]]
}
tip_log['max'] = {
pip: len(tip_log['tips'][pip])
for pip in [m300]
}
def pick_up(pip):
nonlocal tip_log
if tip_log['count'][pip] == tip_log['max'][pip]:
ctx.pause('Replace ' + str(pip.max_volume) + 'µl tipracks before \
resuming.')
pip.reset_tipracks()
tip_log['count'][pip] = 0
pip.pick_up_tip(tip_log['tips'][pip][tip_log['count'][pip]])
tip_log['count'][pip] += 1
# pool samples
num_cols = math.ceil(NUM_SAMPLES/8)
for i in range(math.ceil(num_cols/2)):
if num_cols % 2 != 0 and i == math.ceil(num_cols/2) - 1:
pool_source_set = [dest_plate.rows()[0][num_cols]]
vol = SAMPLE_VOLUME*2
else:
pool_source_set = dest_plate.rows()[0][i*2:i*2+2]
vol = SAMPLE_VOLUME
for s in pool_source_set:
pick_up(m300)
m300.transfer(vol, s, dest_plate.rows()[0][i+8], air_gap=20,
new_tip='never')
m300.air_gap(20)
m300.drop_tip()
ctx.comment('Move deepwell plate (slot 2) to Station B for RNA \
extraction.')
# track final used tip
if not ctx.is_simulating():
if not os.path.isdir(folder_path):
os.mkdir(folder_path)
data = {'tips1000': tip_log['count'][m300]}
with open(tip_file_path, 'w') as outfile:
json.dump(data, outfile)
| 31.123596
| 78
| 0.574729
| 376
| 2,770
| 4.023936
| 0.345745
| 0.06345
| 0.065433
| 0.039656
| 0.179114
| 0.088566
| 0.035691
| 0
| 0
| 0
| 0
| 0.054592
| 0.292419
| 2,770
| 88
| 79
| 31.477273
| 0.717347
| 0.024549
| 0
| 0.042254
| 0
| 0
| 0.132047
| 0.030786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.056338
| 0
| 0.084507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88e9ebaa162edc8b9d8c063256bea5900e94971c
| 5,101
|
py
|
Python
|
Reuters/reuters.py
|
dheeraj7596/SCDV
|
e83fc81e1b59bebfa2fa1e334097caa44f9e7f48
|
[
"MIT"
] | 60
|
2017-05-25T14:08:50.000Z
|
2022-02-04T19:29:44.000Z
|
Reuters/reuters.py
|
vgupta123/SCDV
|
329b13a413318262f1888d872d8e33b30217cbc7
|
[
"MIT"
] | 2
|
2020-03-27T14:01:12.000Z
|
2020-07-16T14:33:31.000Z
|
Reuters/reuters.py
|
vgupta123/SCDV
|
329b13a413318262f1888d872d8e33b30217cbc7
|
[
"MIT"
] | 19
|
2017-11-10T01:06:28.000Z
|
2021-09-25T19:31:25.000Z
|
# Reuters-21578 dataset downloader and parser
#
# Author: Eustache Diemert <eustache@diemert.fr>
# http://scikit-learn.org/stable/auto_examples/applications/plot_out_of_core_classification.html
#
# Modified by @herrfz, get pandas DataFrame from the orig SGML
# License: BSD 3 clause
from __future__ import print_function
import re
import os.path
import fnmatch
import sgmllib
import urllib
import tarfile
import itertools
from pandas import DataFrame
###############################################################################
# Reuters Dataset related routines
###############################################################################
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
class ReutersParser(sgmllib.SGMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self._reset()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
try:
for chunk in fd:
self.feed(chunk)
for doc in self.docs:
yield doc
self.docs = []
except:
pass
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
class ReutersStreamReader():
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
def __init__(self, data_path):
self.data_path = data_path
if not os.path.exists(self.data_path):
self.download_dataset()
def download_dataset(self):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
self.data_path)
os.mkdir(self.data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
urllib.urlretrieve(self.DOWNLOAD_URL,
filename=os.path.join(self.data_path,
self.ARCHIVE_FILENAME),
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untaring data ...")
tfile = tarfile.open(os.path.join(self.data_path,
self.ARCHIVE_FILENAME),
'r:gz')
tfile.extractall(self.data_path)
print("done !")
def iterdocs(self):
"""Iterate doc by doc, yield a dict."""
for root, _dirnames, filenames in os.walk(self.data_path):
for filename in fnmatch.filter(filenames, '*.sgm'):
path = os.path.join(root, filename)
parser = ReutersParser()
for doc in parser.parse(open(path)):
yield doc
def get_minibatch(doc_iter, size):
"""Extract a minibatch of examples, return a tuple X, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return DataFrame([])
else:
return DataFrame(data, columns=['text', 'tags'])
| 30.183432
| 96
| 0.556558
| 606
| 5,101
| 4.516502
| 0.325083
| 0.032883
| 0.039459
| 0.017537
| 0.078919
| 0.042382
| 0.02996
| 0.02996
| 0.02996
| 0
| 0
| 0.011338
| 0.308371
| 5,101
| 168
| 97
| 30.363095
| 0.764456
| 0.172123
| 0
| 0.19469
| 0
| 0
| 0.072482
| 0.008998
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176991
| false
| 0.017699
| 0.079646
| 0.00885
| 0.318584
| 0.053097
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ebd6984c24756abffb46e10ea57a9b5e6af63f
| 485
|
py
|
Python
|
ratemyprof_api/professor.py
|
nananananate/ratemyprof-api
|
c037e68e763154cc60812393538c7aa380fbb90e
|
[
"MIT"
] | 7
|
2021-09-29T22:48:56.000Z
|
2022-02-23T16:54:10.000Z
|
ratemyprof_api/professor.py
|
nananananate/ratemyprof-api
|
c037e68e763154cc60812393538c7aa380fbb90e
|
[
"MIT"
] | null | null | null |
ratemyprof_api/professor.py
|
nananananate/ratemyprof-api
|
c037e68e763154cc60812393538c7aa380fbb90e
|
[
"MIT"
] | 1
|
2021-11-19T02:48:08.000Z
|
2021-11-19T02:48:08.000Z
|
class Professor:
def __init__(self, ratemyprof_id: int, first_name: str, last_name: str, num_of_ratings: int, overall_rating):
self.ratemyprof_id = ratemyprof_id
self.name = f"{first_name} {last_name}"
self.first_name = first_name
self.last_name = last_name
self.num_of_ratings = num_of_ratings
if self.num_of_ratings < 1:
self.overall_rating = 0
else:
self.overall_rating = float(overall_rating)
| 30.3125
| 113
| 0.65567
| 66
| 485
| 4.409091
| 0.348485
| 0.123711
| 0.164948
| 0.109966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005602
| 0.263918
| 485
| 15
| 114
| 32.333333
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0.049587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ec6c26cd7a2f727e00f467fdd178e22cb46386
| 810
|
py
|
Python
|
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1
|
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
hello/hello_sqlite.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1
|
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 导入SQLite驱动:
import sqlite3
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('hello.db')
# 创建一个Cursor:
cursor = conn.cursor()
cursor.execute('drop table user')
# 执行一条SQL语句,创建user表:
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
# 继续执行一条SQL语句,插入一条记录:
cursor.execute('insert into user (id, name) values (\'1\', \'Michael\')')
cursor.execute('insert into user (id, name) values (\'2\', \'Jackson\')')
# 通过rowcount获得插入的行数:
print(cursor.rowcount)
# 查询:
print(cursor.execute('select * from user').fetchall())
print(cursor.execute('select * from user').fetchmany(size=1))
print(cursor.execute('select * from user').fetchone())
# 关闭Cursor:
cursor.close()
# 提交事务:
conn.commit()
# 关闭Connection:
conn.close()
| 26.129032
| 82
| 0.707407
| 103
| 810
| 5.563107
| 0.553398
| 0.158813
| 0.094241
| 0.125654
| 0.303665
| 0.303665
| 0.136126
| 0.136126
| 0
| 0
| 0
| 0.013793
| 0.104938
| 810
| 31
| 83
| 26.129032
| 0.776552
| 0.253086
| 0
| 0
| 0
| 0
| 0.376689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88edaae7baa65ef0737db43dff89261e7016c55e
| 1,324
|
py
|
Python
|
ig_data/InstaSearch.py
|
swapnanildutta/instagram-search
|
919a3383f0f7789671108f899d9ba9092a69009f
|
[
"MIT"
] | 1
|
2022-01-04T16:51:50.000Z
|
2022-01-04T16:51:50.000Z
|
ig_data/InstaSearch.py
|
swapnanildutta/instagram-search
|
919a3383f0f7789671108f899d9ba9092a69009f
|
[
"MIT"
] | 3
|
2020-10-26T13:31:05.000Z
|
2022-01-05T23:11:42.000Z
|
ig_data/InstaSearch.py
|
swapnanildutta/instagram-search
|
919a3383f0f7789671108f899d9ba9092a69009f
|
[
"MIT"
] | 2
|
2020-04-07T09:24:07.000Z
|
2020-04-14T06:38:49.000Z
|
# imports
import requests, json
# beautifulsoup4
from bs4 import BeautifulSoup
def searchDisplay(username):
# base url for the data
url = 'https://www.instagram.com/{}/'.format(username)
try:
req = requests.get(url).content
soup=BeautifulSoup(req,"html.parser")
row=soup.find_all('script')
details=str(row[3]).strip("<script type=></")[22:].strip()
account=json.loads(details)
try:
if len(account['description'])<1:
account['description']=""
except:
account['description']=""
print("Name : ",account['name'],'\t',"Username : ",account['alternateName'],
'\t',"Followers : ",account['mainEntityofPage']['interactionStatistic']['userInteractionCount'],'\n',
"Bio : ",account['description'])
except:
print('Not found or no internet connection')
def getDetails(username):
url = 'https://www.instagram.com/{}/'.format(username)
try:
req = requests.get(url).content
soup=BeautifulSoup(req,"html.parser")
row=soup.find_all('script')
details=row[3].text
account=json.loads(details)
return account
except:
print('Not found or no internet connection')
return {}
| 33.948718
| 116
| 0.58006
| 135
| 1,324
| 5.674074
| 0.474074
| 0.093995
| 0.028721
| 0.052219
| 0.422977
| 0.422977
| 0.422977
| 0.422977
| 0.315927
| 0.315927
| 0
| 0.007202
| 0.265861
| 1,324
| 38
| 117
| 34.842105
| 0.780864
| 0.033233
| 0
| 0.625
| 0
| 0
| 0.272433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.1875
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ef1b2b45df53d3ae9e2451d75c76436af81011
| 2,369
|
py
|
Python
|
tests/jax_ops_test.py
|
ita9naiwa/fast-soft-sort
|
72cbd93ecc229736f9e05bfdfd0f48c09432904f
|
[
"Apache-2.0"
] | 389
|
2020-06-08T22:30:18.000Z
|
2022-03-25T23:04:28.000Z
|
tests/jax_ops_test.py
|
ita9naiwa/fast-soft-sort
|
72cbd93ecc229736f9e05bfdfd0f48c09432904f
|
[
"Apache-2.0"
] | 14
|
2020-06-21T13:21:51.000Z
|
2021-10-18T18:02:07.000Z
|
tests/jax_ops_test.py
|
ita9naiwa/fast-soft-sort
|
72cbd93ecc229736f9e05bfdfd0f48c09432904f
|
[
"Apache-2.0"
] | 32
|
2020-06-20T17:25:10.000Z
|
2022-03-26T13:34:23.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_ops.py."""
import functools
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax.numpy as jnp
import jax
from jax.config import config
config.update("jax_enable_x64", True)
from fast_soft_sort import jax_ops
GAMMAS = (0.1, 1, 10.0)
DIRECTIONS = ("ASCENDING", "DESCENDING")
REGULARIZERS = ("l2", )
class JaxOpsTest(parameterized.TestCase):
def _test(self, func, regularization_strength, direction, regularization):
def loss_func(values):
soft_values = func(values,
regularization_strength=regularization_strength,
direction=direction,
regularization=regularization)
return jnp.sum(soft_values ** 2)
rng = np.random.RandomState(0)
values = jnp.array(rng.randn(5, 10))
mat = jnp.array(rng.randn(5, 10))
unitmat = mat / np.sqrt(np.vdot(mat, mat))
eps = 1e-5
numerical = (loss_func(values + 0.5 * eps * unitmat) -
loss_func(values - 0.5 * eps * unitmat)) / eps
autodiff = jnp.vdot(jax.grad(loss_func)(values), unitmat)
np.testing.assert_almost_equal(numerical, autodiff)
@parameterized.parameters(itertools.product(GAMMAS, DIRECTIONS, REGULARIZERS))
def test_soft_rank(self, regularization_strength, direction, regularization):
self._test(jax_ops.soft_rank,
regularization_strength, direction, regularization)
@parameterized.parameters(itertools.product(GAMMAS, DIRECTIONS, REGULARIZERS))
def test_soft_sort(self, regularization_strength, direction, regularization):
self._test(jax_ops.soft_sort,
regularization_strength, direction, regularization)
if __name__ == "__main__":
absltest.main()
| 32.452055
| 80
| 0.71718
| 304
| 2,369
| 5.457237
| 0.4375
| 0.092827
| 0.112116
| 0.135624
| 0.229054
| 0.229054
| 0.206148
| 0.174804
| 0.174804
| 0.174804
| 0
| 0.016154
| 0.189954
| 2,369
| 72
| 81
| 32.902778
| 0.848359
| 0.24103
| 0
| 0.097561
| 0
| 0
| 0.024157
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.097561
| false
| 0
| 0.243902
| 0
| 0.390244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88f18a67e803424dde5d28eb3302913d647a3a2f
| 27,163
|
py
|
Python
|
src/pages/pe_cuencas.py
|
ValentinSilvestri/cammesa
|
33ff17ad4a0447fd4668b6adad1c4bbfd88aba8e
|
[
"MIT"
] | null | null | null |
src/pages/pe_cuencas.py
|
ValentinSilvestri/cammesa
|
33ff17ad4a0447fd4668b6adad1c4bbfd88aba8e
|
[
"MIT"
] | null | null | null |
src/pages/pe_cuencas.py
|
ValentinSilvestri/cammesa
|
33ff17ad4a0447fd4668b6adad1c4bbfd88aba8e
|
[
"MIT"
] | null | null | null |
import os
import re
import pymongo
import pandas as pd
import numpy as np
import streamlit as st
from bokeh.plotting import figure
from bokeh.palettes import Set1_9, Set3_12, Inferno256
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def get_caudales():
"""Function to obtain the rivers basin flows from MongoDB Atlas.
Returns:
DataFrame: Pandas DataFrame with the query result.
"""
st.spinner("Obteniendo los datos de caudales...")
client = pymongo.MongoClient(os.environ['MONGO'])
try:
collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos']
project={
'_id': 0,
'fecha': 1,
'situacionCuencaComahue': {
'Caudal Collon Cura': 1,
'Caudal Neuquen': 1,
'Caudal Limay': 1,
'Caudal Río Negro': 1,
'Caudal Limay despues desembocadura de Collon Cura': 1
},
'situacionYacyretaSaltoGrande': {
'Caudal Río Uruguay': 1,
'Caudal Río Paraná': 1
},
'situacionCuencaPatagonica': {
'Caudal Río Chubut': 1,
'Caudal Río Futaleufu': 1
},
'situacionCuencaRioGrande': {
'Caudal Río Grande': 1
},
'situacionCuencaRioSanJuan': {
'Caudal Inicial Río San Juan': 1,
'Caudal Final Río San Juan': 1
}
}
df = pd.DataFrame(collection_name.find(projection=project))
return df
except Exception as e:
st.error(f'Opps, algo fallo\n{e}')
finally:
client.close()
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def get_cotas():
"""Function to obtai the rivers basin levels from MongoDB Atlas.
Returns:
DataFrame: Pandas DataFrame with the query result.
"""
st.spinner("Obteniendo los datos de cotas...")
client = pymongo.MongoClient(os.environ['MONGO'])
try:
collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos']
project={
'_id': 0,
'fecha': 1,
'situacionCuencaComahue': {
'Cota Hoy Alicura': 1,
'Cota Min Alicura': 1,
'Cota Max Alicura': 1,
'Cota Hoy Mari Menuco': 1,
'Cota Min Mari Menuco': 1,
'Cota Max Mari Menuco': 1,
'Cota Hoy Piedra del Aguila': 1,
'Cota Min Piedra del Aguila': 1,
'Cota Max Piedra del Aguila': 1,
'Cota Hoy Planicie Banderita Barreales': 1,
'Cota Min Planicie Banderita Barreales': 1,
'Cota Max Planicie Banderita Barreales': 1,
'Cota Hoy Arroyito': 1,
'Cota Min Arroyito': 1,
'Cota Max Arroyito': 1,
'Cota Hoy El Chocon': 1,
'Cota Min El Chocon': 1,
'Cota Max El Chocon': 1,
'Cota Hoy P': {
'P': {
'Leufu': 1
}
}
},
'situacionYacyretaSaltoGrande': {
'Cota Hoy Yacyreta': 1,
'Cota Min Yacyreta': 1,
'Cota Max Yacyreta': 1,
'Cota Hoy Salto Grande': 1,
'Cota Min Salto Grande': 1,
'Cota Max Salto Grande': 1
},
'situacionCuencaPatagonica': {
'Cota Hoy Futaleufu': 1,
'Cota Min Futaleufu': 1,
'Cota Max Futaleufu': 1,
'Cota Hoy Ameghino': 1,
'Cota Min Ameghino': 1,
'Cota Max Ameghino': 1
},
'situacionCuencaRioGrande': {
'Cota Hoy Río Grande': 1,
'Cota Min Río Grande': 1,
'Cota Max Río Grande': 1
},
'situacionCuencaRioSanJuan': {
'Cota Hoy Quebrada de Ullum': 1,
'Cota Min Quebrada de Ullum': 1,
'Cota Max Quebrada de Ullum': 1,
'Cota Hoy Los Caracole': 1,
'Cota Min Los Caracoles': 1,
'Cota Max Los Caracoles': 1,
'Cota Hoy Punta Negra': 1,
'Cota Min Punta Negra': 1,
'Cota Max Punta Negra': 1
}
}
df = pd.DataFrame(collection_name.find(projection=project))
return df
except Exception as e:
st.error(f'Opps, algo fallo\n{e}')
finally:
client.close()
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def get_turbinado():
"""Function to obtain the rivers basin turbinate from MongoDB Atlas.
Returns:
DataFrame: Pandas DataFrame with the query result.
"""
st.spinner("Obteniendo los datos de turbinado...")
client = pymongo.MongoClient(os.environ['MONGO'])
try:
collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos']
project={
'_id': 0,
'fecha': 1,
'situacionCuencaComahue': {
'Turbinado Alicura': 1,
'Turbinado Piedra del Aguila': 1,
'Turbinado Arroyito': 1,
'Turbinado El Chocon': 1,
'Turbinado Mari Menuco': 1,
'Turbinado P': {
'P': {
'Leufu': 1
}
}
},
'situacionYacyretaSaltoGrande': {
'Turbinado Salto Grande': 1,
'Turbinado Yacyreta': 1
},
'situacionCuencaPatagonica': {
'Turbinado Futaleufu': 1,
'Turbinado Ameghino': 1
},
'situacionCuencaRioGrande': {
'Turbinado Río Grande': 1
},
'situacionCuencaRioSanJuan': {
'Turbinado Punta Negra': 1,
'Turbinado Ullum': 1,
'Turbinado Los Caracoles': 1,
'Turbinado Quebrada de Ullum': 1
}
}
df = pd.DataFrame(collection_name.find(projection=project))
return df
except Exception as e:
st.error(f'Opps, algo fallo\n{e}')
finally:
client.close()
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def get_vertido():
"""Function to obtain the rivers basin discharge from MongoDB Atlas.
Returns:
DataFrame: Pandas DataFrame with the query result.
"""
st.spinner("Obteniendo los datos de turbinado...")
client = pymongo.MongoClient(os.environ['MONGO'])
try:
collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos']
project={
'_id': 0,
'fecha': 1,
'situacionCuencaComahue': {
'Vertido El Chañar': 1,
'Vertido Arroyito': 1,
'Vertido Piedra del Aguila': 1,
'Vertido P': {
'P': {
'Leufu': 1
}
}
},
'situacionYacyretaSaltoGrande': {
'Vertido Salto Grande': 1,
'Vertido Yacyreta': 1
},
'situacionCuencaPatagonica': {
'Vertido Futaleufu': 1,
'Vertido Ameghino': 1
},
'situacionCuencaRioGrande': {
'Bombeo Río Grande': 1
},
'situacionCuencaRioSanJuan': {
'Vertido Punta Negra': 1,
'Vertido Los Caracoles': 1,
'Vertido Quebrada de Ullum': 1
}
}
df = pd.DataFrame(collection_name.find(projection=project))
return df
except Exception as e:
st.error(f'Opps, algo fallo\n{e}')
finally:
client.close()
def caudales():
"""Get the rivers basin flows and process this data.
Returns:
Figure: Bokeh plotting figure.
DataFrame: Pandas DataFrame with the query result.
"""
df = get_caudales()
df = pd.concat([
df['fecha'],
pd.json_normalize(df['situacionCuencaComahue']),
pd.json_normalize(df['situacionYacyretaSaltoGrande']),
pd.json_normalize(df['situacionCuencaPatagonica']),
pd.json_normalize(df['situacionCuencaRioGrande']),
pd.json_normalize(df['situacionCuencaRioSanJuan'])
], axis=1, join="inner")
df.rename(columns={
"fecha": "Fecha",
"Caudal Collon Cura": "Cuenca Comahue - Caudal Collon Cura",
"Caudal Neuquen": "Cuenca Comahue - Caudal Neuquen",
"Caudal Limay": "Cuenca Comahue - Caudal Limay",
"Caudal Río Negro": "Cuenca Comahue - Caudal Río Negro",
"Caudal Limay despues desembocadura de Collon Cura": "Cuenca Comahue - Caudal Limay despues desembocadura de Collon Cura",
"Caudal Río Uruguay": "Yacyreta Salto Grande - Caudal Río Uruguay",
"Caudal Río Paraná": "Yacyreta Salto Grande - Caudal Río Paraná",
"Caudal Río Chubut": "Cuenca Patagónica - Caudal Río Chubut",
"Caudal Río Futaleufu": "Cuenca Patagónica - Caudal Río Futaleufu",
"Caudal Río Grande": "Cuenca Río Grande - Caudal Río Grande",
"Caudal Inicial Río San Juan": "Cuenca Río San Juan - Caudal Inicial Río San Juan",
"Caudal Final Río San Juan": "Cuenca Río San Juan - Caudal Final Río San Juan"
}, inplace=True)
df['Fecha'] = pd.to_datetime(df['Fecha'], format='%Y/%m/%d').dt.date
df = df.drop_duplicates().sort_values('Fecha', ascending=False).reset_index(drop=True)
df = df.replace(0, np.nan)
p = figure(x_axis_type="datetime", title="Caudales cuencas", sizing_mode="stretch_both")
p.grid.grid_line_alpha=0.3
p.xaxis.axis_label = 'Fecha'
p.yaxis.axis_label = 'Caudal [m\u00b3/s]'
p.legend.location = "top_left"
return p, df
def cotas():
"""Get the rivers basin levels and process this data.
Returns:
Figure: Bokeh plotting figure.
DataFrame: Pandas DataFrame with the query result.
"""
df = get_cotas()
df = pd.concat([
df['fecha'],
pd.json_normalize(df['situacionCuencaComahue']),
pd.json_normalize(df['situacionYacyretaSaltoGrande']),
pd.json_normalize(df['situacionCuencaPatagonica']),
pd.json_normalize(df['situacionCuencaRioGrande']),
pd.json_normalize(df['situacionCuencaRioSanJuan'])
], axis=1, join="inner")
df.rename(columns={
'fecha': 'Fecha',
'Cota Hoy Alicura': 'Cuenca Comahue - Alicura',
'Cota Min Alicura': 'Cuenca Comahue - Min Alicura',
'Cota Max Alicura': 'Cuenca Comahue - Max Alicura',
'Cota Hoy Piedra del Aguila': 'Cuenca Comahue - Piedra del Aguil',
'Cota Min Piedra del Aguila': 'Cuenca Comahue - Min Piedra del Aguila',
'Cota Max Piedra del Aguila': 'Cuenca Comahue - Max Piedra del Aguila',
'Cota Hoy Arroyito': 'Cuenca Comahue - Arroyito',
'Cota Min Arroyito': 'Cuenca Comahue - Min Arroyito',
'Cota Max Arroyito': 'Cuenca Comahue - Max Arroyito',
'Cota Hoy Mari Menuco': 'Cuenca Comahue - Mari Menuco',
'Cota Min Mari Menuco': 'Cuenca Comahue - Min Mari Menuco',
'Cota Max Mari Menuco': 'Cuenca Comahue - Max Mari Menuco',
'Cota Hoy Planicie Banderita Barreales': 'Cuenca Comahue - Planicie Banderita Barreales',
'Cota Min Planicie Banderita Barreales': 'Cuenca Comahue - Min Planicie Banderita Barreales',
'Cota Max Planicie Banderita Barreales': 'Cuenca Comahue - Max Planicie Banderita Barreales',
'Cota Hoy El Chocon': 'Cuenca Comahue - El Chocon',
'Cota Min El Chocon': 'Cuenca Comahue - Min El Chocon',
'Cota Max El Chocon': 'Cuenca Comahue - Max El Chocon',
'Cota Hoy P.P.Leufu': 'Cuenca Comahue - Leufu',
'Cota Hoy Yacyreta': 'Cuenca Yacyreta - Yacyreta',
'Cota Min Yacyreta': 'Cuenca Yacyreta - Min Yacyreta',
'Cota Max Yacyreta': 'Cuenca Yacyreta - Max Yacyreta',
'Cota Hoy Salto Grande': 'Cuenca Yacyreta - Salto Grande',
'Cota Min Salto Grande': 'Cuenca Yacyreta - Min Salto Grande',
'Cota Max Salto Grande': 'Cuenca Yacyreta - Max Salto Grande',
'Cota Hoy Futaleufu': 'Cuenca Patagónica - Futaleufu',
'Cota Min Futaleufu': 'Cuenca Patagónica - Min Futaleufu',
'Cota Max Futaleufu': 'Cuenca Patagónica - Max Futaleufu',
'Cota Hoy Ameghino': 'Cuenca Patagónica - Ameghino',
'Cota Min Ameghino': 'Cuenca Patagónica - Min Ameghino',
'Cota Max Ameghino': 'Cuenca Patagónica - Max Ameghino',
'Cota Hoy Río Grande': 'Cuenca Río Grande - Río Grande',
'Cota Min Río Grande': 'Cuenca Río Grande - Min Río Grande',
'Cota Max Río Grande': 'Cuenca Río Grande - Max Río Grande',
'Cota Hoy Quebrada de Ullum': 'Cuenca Río San Juan - Quebrada de Ullum',
'Cota Min Quebrada de Ullum': 'Cuenca Río San Juan - Min Quebrada de Ullum',
'Cota Max Quebrada de Ullum': 'Cuenca Río San Juan - Max Quebrada de Ullum',
'Cota Hoy Punta Negra': 'Cuenca Río San Juan - Punta Negra',
'Cota Min Punta Negra': 'Cuenca Río San Juan - Min Punta Negra',
'Cota Max Punta Negra': 'Cuenca Río San Juan - Max Punta Negra'
}, inplace=True)
df['Fecha'] = pd.to_datetime(df['Fecha'], format='%Y/%m/%d').dt.date
df = df.drop_duplicates().sort_values('Fecha', ascending=False).reset_index(drop=True)
df = df.replace(0, np.nan)
p = figure(x_axis_type="datetime", title="Cotas cuencas", sizing_mode="stretch_both")
p.grid.grid_line_alpha=0.3
p.xaxis.axis_label = 'Fecha'
p.yaxis.axis_label = 'Cota [cm]'
p.legend.location = "top_left"
return p, df
def turbinado():
"""Get the rivers basin discharge and process this data.
Returns:
Figure: Bokeh plotting figure.
DataFrame: Pandas DataFrame with the query result.
"""
df = get_turbinado()
df = pd.concat([
df['fecha'],
pd.json_normalize(df['situacionCuencaComahue']),
pd.json_normalize(df['situacionYacyretaSaltoGrande']),
pd.json_normalize(df['situacionCuencaPatagonica']),
pd.json_normalize(df['situacionCuencaRioGrande']),
pd.json_normalize(df['situacionCuencaRioSanJuan'])
], axis=1, join="inner")
df.rename(columns={
'fecha': 'Fecha',
'Turbinado Alicura': 'Cuenca Comahue - Alicura',
'Turbinado Piedra del Aguila': 'Cuenca Comahue - Piedra del Aguila',
'Turbinado Arroyito': 'Cuenca Comahue - Arroyito',
'Turbinado El Chocon': 'Cuenca Comahue - El Chocon',
'Turbinado Mari Menuco': 'Cuenca Comahue - Mari Menuco',
'Turbinado P.P.Leufu': 'Cuenca Comahue - Leufu',
'Turbinado Salto Grande': 'Cuenca Yacyreta - Salto Grande',
'Turbinado Yacyreta': 'Cuenca Yacyreta - Yacyreta',
'Turbinado Futaleufu': 'Cuenca Patagónica - Futaleufu',
'Turbinado Ameghino': 'Cuenca Patagónica - Ameghino',
'Turbinado Río Grande': 'Cuenca Río Grande - Río Grande',
'Turbinado Punta Negra': 'Cuenca Río San Juan - Punta Negra',
'Turbinado Ullum': 'Cuenca Río San Juan - Ullum',
'Turbinado Los Caracoles': 'Cuenca Río San Juan - Los Caracoles',
'Turbinado Quebrada de Ullum': 'Cuenca Río San Juan - Quebrada de Ullum'
}, inplace=True)
df['Fecha'] = pd.to_datetime(df['Fecha'], format='%Y/%m/%d').dt.date
df = df.drop_duplicates().sort_values('Fecha', ascending=False).reset_index(drop=True)
# df = df.replace(0, np.nan)
p = figure(x_axis_type="datetime", title="Turbinado", sizing_mode="stretch_both")
p.grid.grid_line_alpha=0.3
p.xaxis.axis_label = 'Fecha'
p.yaxis.axis_label = 'Turbinado'
p.legend.location = "top_left"
return p, df
def vertido():
"""Get the rivers basin discharge and process this data.
Returns:
Figure: Bokeh plotting figure.
DataFrame: Pandas DataFrame with the query result.
"""
df = get_vertido()
df = pd.concat([
df['fecha'],
pd.json_normalize(df['situacionCuencaComahue']),
pd.json_normalize(df['situacionYacyretaSaltoGrande']),
pd.json_normalize(df['situacionCuencaPatagonica']),
pd.json_normalize(df['situacionCuencaRioGrande']),
pd.json_normalize(df['situacionCuencaRioSanJuan'])
], axis=1, join="inner")
df.rename(columns={
'fecha': 'Fecha',
'Vertido El Chañar': 'Cuenca Comahue - El Chañar',
'Vertido Arroyito': 'Cuenca Comahue - Arroyito',
'Vertido Piedra del Aguila': 'Cuenca Comahue - Piedra del Aguila',
'Vertido P.P.Leufu': 'Cuenca Comahue - Leufu',
'Vertido Salto Grande': 'Cuenca Yacyreta - Salto Grande',
'Vertido Yacyreta': 'Cuenca Yacyreta - Yacyreta',
'Vertido Futaleufu': 'Cuenca Patagónica - Futaleufu',
'Vertido Ameghino': 'Cuenca Patagónica - Ameghino',
'Bombeo Río Grande': 'Cuenca Río Grande - Bombeo Río Grande',
'Vertido Punta Negra': 'Cuenca Río San Juan - Punta Negra',
'Vertido Los Caracoles': 'Cuenca Río San Juan - Los Caracoles',
'Vertido Quebrada de Ullum': 'Cuenca Río San Juan - Quebrada de Ullum'
}, inplace=True)
df['Fecha'] = pd.to_datetime(df['Fecha'], format='%Y/%m/%d').dt.date
df = df.drop_duplicates().sort_values('Fecha', ascending=False).reset_index(drop=True)
# df = df.replace(0, np.nan)
p = figure(x_axis_type="datetime", title="Vertido", sizing_mode="stretch_both")
p.grid.grid_line_alpha=0.3
p.xaxis.axis_label = 'Fecha'
p.yaxis.axis_label = 'Vertido'
p.legend.location = "top_left"
return p, df
def write():
"""Function to write the Streamlit content of the page pe_cuencas
"""
p_caudales, df_caudales = caudales()
p_cotas, df_cotas = cotas()
p_turbinado, df_turbinado = turbinado()
p_vertido, df_vertido = vertido()
st.header("Publicaciones especiales - Cuencas/Datos Hidráulicos 🌊", anchor=None)
with st.container():
st.subheader("Análisis de caudales", anchor=None)
options = st.multiselect(
"Seleccionar datos a graficar.",
options=[
"Cuenca Comahue - Caudal Collon Cura",
"Cuenca Comahue - Caudal Neuquen",
"Cuenca Comahue - Caudal Limay",
"Cuenca Comahue - Caudal Río Negro",
"Cuenca Comahue - Caudal Limay despues desembocadura de Collon Cura",
"Yacyreta Salto Grande - Caudal Río Uruguay",
"Yacyreta Salto Grande - Caudal Río Paraná",
"Cuenca Patagónica - Caudal Río Chubut",
"Cuenca Patagónica - Caudal Río Futaleufu",
"Cuenca Río Grande - Caudal Río Grande",
"Cuenca Río San Juan - Caudal Inicial Río San Juan",
"Cuenca Río San Juan - Caudal Final Río San Juan"
],
default=[
"Yacyreta Salto Grande - Caudal Río Paraná",
"Yacyreta Salto Grande - Caudal Río Uruguay"
]
)
if len(options)>9:
col = Set3_12
else:
col = Set1_9
for index, value in enumerate(options):
p_caudales.line(
df_caudales['Fecha'],
df_caudales[value],
color=col[index],
legend_label=re.split(r" - ", value)[1].strip()
)
st.bokeh_chart(p_caudales)
with st.expander("Ver datos"):
st.write("Datos de los caudales de las cuencas en [m\u00b3/s].")
st.dataframe(df_caudales)
st.download_button(
label="Descargar dataset como .CSV",
data=df_caudales.to_csv(index=False).encode('utf-8'),
file_name='Caudales.csv',
mime='text/csv',
)
with st.container():
st.subheader("Análisis de cotas", anchor=None)
options_cotas = st.multiselect(
"Seleccionar datos a graficar.",
options=[
'Cuenca Comahue - Alicura',
'Cuenca Comahue - Min Alicura',
'Cuenca Comahue - Max Alicura',
'Cuenca Comahue - Piedra del Aguil',
'Cuenca Comahue - Min Piedra del Aguila',
'Cuenca Comahue - Max Piedra del Aguila',
'Cuenca Comahue - Arroyito',
'Cuenca Comahue - Min Arroyito',
'Cuenca Comahue - Max Arroyito',
'Cuenca Comahue - Mari Menuco',
'Cuenca Comahue - Min Mari Menuco',
'Cuenca Comahue - Max Mari Menuco',
'Cuenca Comahue - Planicie Banderita Barreales',
'Cuenca Comahue - Min Planicie Banderita Barreales',
'Cuenca Comahue - Max Planicie Banderita Barreales',
'Cuenca Comahue - El Chocon',
'Cuenca Comahue - Min El Chocon',
'Cuenca Comahue - Max El Chocon',
'Cuenca Comahue - Leufu',
'Cuenca Yacyreta - Yacyreta',
'Cuenca Yacyreta - Min Yacyreta',
'Cuenca Yacyreta - Max Yacyreta',
'Cuenca Yacyreta - Salto Grande',
'Cuenca Yacyreta - Min Salto Grande',
'Cuenca Yacyreta - Max Salto Grande',
'Cuenca Patagónica - Futaleufu',
'Cuenca Patagónica - Min Futaleufu',
'Cuenca Patagónica - Max Futaleufu',
'Cuenca Patagónica - Ameghino',
'Cuenca Patagónica - Min Ameghino',
'Cuenca Patagónica - Max Ameghino',
'Cuenca Río Grande - Río Grande',
'Cuenca Río Grande - Min Río Grande',
'Cuenca Río Grande - Max Río Grande',
'Cuenca Río San Juan - Quebrada de Ullum',
'Cuenca Río San Juan - Min Quebrada de Ullum',
'Cuenca Río San Juan - Max Quebrada de Ullum',
'Cuenca Río San Juan - Punta Negra',
'Cuenca Río San Juan - Min Punta Negra',
'Cuenca Río San Juan - Max Punta Negra'
],
default=[
'Cuenca Yacyreta - Salto Grande',
'Cuenca Yacyreta - Min Salto Grande',
'Cuenca Yacyreta - Max Salto Grande'
]
)
if len(options_cotas)<=9:
col = Set1_9
elif len(options_cotas) <=12:
col = Set3_12
else:
col = Inferno256
for index, value in enumerate(options_cotas):
p_cotas.line(
df_cotas['Fecha'],
df_cotas[value],
color=col[index],
legend_label=re.split(r" - ", value)[1].strip()
)
st.bokeh_chart(p_cotas)
with st.expander("Ver datos"):
st.write("Datos de los Cotas de las cuencas en [cm].")
st.dataframe(df_cotas)
st.download_button(
label="Descargar dataset como .CSV",
data=df_cotas.to_csv(index=False).encode('utf-8'),
file_name='Cotas.csv',
mime='text/csv',
)
with st.container():
st.subheader("Análisis del turbinado", anchor=None)
options_turbinado = st.multiselect(
"Seleccionar datos a graficar.",
options=[
'Cuenca Comahue - Alicura',
'Cuenca Comahue - Piedra del Aguila',
'Cuenca Comahue - Arroyito',
'Cuenca Comahue - El Chocon',
'Cuenca Comahue - Mari Menuco',
'Cuenca Comahue - Leufu',
'Cuenca Yacyreta - Salto Grande',
'Cuenca Yacyreta - Yacyreta',
'Cuenca Patagónica - Futaleufu',
'Cuenca Patagónica - Ameghino',
'Cuenca Río Grande - Río Grande',
'Cuenca Río San Juan - Punta Negra',
'Cuenca Río San Juan - Ullum',
'Cuenca Río San Juan - Los Caracoles',
'Cuenca Río San Juan - Quebrada de Ullum'
], default=[
'Cuenca Yacyreta - Yacyreta',
'Cuenca Yacyreta - Salto Grande'
]
)
if len(options_turbinado)<=9:
col = Set1_9
elif len(options_turbinado) <=12:
col = Set3_12
else:
col = Inferno256
for index, value in enumerate(options_turbinado):
p_turbinado.line(
df_turbinado['Fecha'],
df_turbinado[value],
color=col[index],
legend_label=re.split(r" - ", value)[1].strip()
)
st.bokeh_chart(p_turbinado)
with st.expander("Ver datos"):
st.write("Datos del turbinado.")
st.dataframe(df_turbinado)
st.download_button(
label="Descargar dataset como .CSV",
data=df_turbinado.to_csv(index=False).encode('utf-8'),
file_name='Turbinado.csv',
mime='text/csv',
)
with st.container():
st.subheader("Análisis del vertido", anchor=None)
options_vertido = st.multiselect(
"Seleccionar datos a graficar.",
options=[
'Cuenca Comahue - El Chañar',
'Cuenca Comahue - Arroyito',
'Cuenca Comahue - Piedra del Aguila',
'Cuenca Comahue - Leufu',
'Cuenca Yacyreta - Salto Grande',
'Cuenca Yacyreta - Yacyreta',
'Cuenca Patagónica - Futaleufu',
'Cuenca Patagónica - Ameghino',
'Cuenca Río Grande - Bombeo Río Grande',
'Cuenca Río San Juan - Punta Negra',
'Cuenca Río San Juan - Los Caracoles',
'Cuenca Río San Juan - Quebrada de Ullum'
], default=[
'Cuenca Yacyreta - Yacyreta',
'Cuenca Yacyreta - Salto Grande'
]
)
if len(options_vertido)>9:
col = Set3_12
else:
col = Set1_9
for index, value in enumerate(options_vertido):
p_vertido.line(
df_vertido['Fecha'],
df_vertido[value],
color=col[index],
legend_label=re.split(r" - ", value)[1].strip()
)
st.bokeh_chart(p_vertido)
with st.expander("Ver datos"):
st.write("Datos del vertido.")
st.dataframe(df_vertido)
st.download_button(
label="Descargar dataset como .CSV",
data=df_vertido.to_csv(index=False).encode('utf-8'),
file_name='Vertido.csv',
mime='text/csv',
)
| 39.538574
| 130
| 0.555793
| 2,864
| 27,163
| 5.208101
| 0.085196
| 0.059265
| 0.025476
| 0.03218
| 0.758045
| 0.65661
| 0.60244
| 0.553366
| 0.506034
| 0.434902
| 0
| 0.009018
| 0.338622
| 27,163
| 686
| 131
| 39.59621
| 0.821208
| 0.045945
| 0
| 0.424191
| 0
| 0
| 0.438753
| 0.04614
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015332
| false
| 0
| 0.013629
| 0
| 0.042589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88f1cc3699cf5781999a9874993e5299f3224a9d
| 5,930
|
py
|
Python
|
utils/gen-vowel-constraints.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | 51
|
2020-01-15T09:28:51.000Z
|
2022-03-30T06:48:36.000Z
|
utils/gen-vowel-constraints.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | 51
|
2020-05-11T18:51:25.000Z
|
2021-12-20T12:55:08.000Z
|
utils/gen-vowel-constraints.py
|
ctrlcctrlv/fontFeatures
|
76d68586da2c1c42bb3cd79f92d583e63f52cf02
|
[
"BSD-3-Clause"
] | 8
|
2020-08-28T20:03:14.000Z
|
2021-12-08T01:22:28.000Z
|
#!/usr/bin/env python3
"""Generator of the function to prohibit certain vowel sequences.
It creates ``_hb_preprocess_text_vowel_constraints``, which inserts dotted
circles into sequences prohibited by the USE script development spec.
This function should be used as the ``preprocess_text`` of an
``hb_ot_complex_shaper_t``.
usage: ./gen-vowel-constraints.py ms-use/IndicShapingInvalidCluster.txt
"""
import collections
import youseedee
def write (s):
sys.stdout.flush ()
sys.stdout.buffer.write (s.encode ('utf-8'))
import sys
if len (sys.argv) != 2:
sys.exit (__doc__)
script_order = {}
scripts = {}
for start, end,script in youseedee.parse_file_ranges("Scripts.txt"):
for u in range (start, end + 1):
scripts[u] = script
if script not in script_order:
script_order[script] = start
class ConstraintSet (object):
"""A set of prohibited code point sequences.
Args:
constraint (List[int]): A prohibited code point sequence.
"""
def __init__ (self, constraint):
# Either a list or a dictionary. As a list of code points, it
# represents a prohibited code point sequence. As a dictionary,
# it represents a set of prohibited sequences, where each item
# represents the set of prohibited sequences starting with the
# key (a code point) concatenated with any of the values
# (ConstraintSets).
self._c = constraint
def add (self, constraint):
"""Add a constraint to this set."""
if not constraint:
return
first = constraint[0]
rest = constraint[1:]
if isinstance (self._c, list):
if constraint == self._c[:len (constraint)]:
self._c = constraint
elif self._c != constraint[:len (self._c)]:
self._c = {self._c[0]: ConstraintSet (self._c[1:])}
if isinstance (self._c, dict):
if first in self._c:
self._c[first].add (rest)
else:
self._c[first] = ConstraintSet (rest)
@staticmethod
def _indent (depth):
return (' ' * depth)
@staticmethod
def _cp_accessor(index):
if index:
return "buffer.items[i+{}].codepoint".format(index)
return "buffer.items[i].codepoint"
def __str__ (self, index=0, depth=2):
s = []
indent = self._indent (depth)
if isinstance (self._c, list):
if len (self._c) == 0:
assert index == 2, 'Cannot use `matched` for this constraint; the general case has not been implemented'
s.append ('{}matched = True\n'.format (indent))
elif len (self._c) == 1:
assert index == 1, 'Cannot use `matched` for this constraint; the general case has not been implemented'
s.append ('{}matched = 0x{:04X} == {}\n'.format (indent, next (iter (self._c)), self._cp_accessor(index)))
else:
s.append ('{}if (0x{:04X} == {} and\n'.format (indent, self._c[0], self._cp_accessor(index)))
if index:
s.append ('{}i + {} < len(buffer.items)-1 and\n'.format (self._indent (depth + 2), index + 1))
for i, cp in enumerate (self._c[1:], start=1):
s.append ('{}0x{:04X} == {}{}\n'.format (
self._indent (depth + 2), cp, self._cp_accessor(index + i), '):' if i == len (self._c) - 1 else 'and')
)
s.append ('{}matched = True\n'.format (self._indent (depth + 1)))
else:
cases = collections.defaultdict (set)
for first, rest in sorted (self._c.items ()):
cases[rest.__str__ (index + 1, depth + 2)].add (first)
for body, labels in sorted (cases.items (), key=lambda b_ls: sorted (b_ls[1])[0]):
if len(labels) == 1:
s.append (self._indent (depth + 1) + "if {} == 0x{:04X}:\n".format(self._cp_accessor(index), list(labels)[0]))
else:
points = ", ".join(['0x{:04X}'.format(cp) for cp in sorted(labels)])
s.append (self._indent (depth + 1) + "if {} in [{}]:\n".format(self._cp_accessor(index), points))
s.append (body)
return ''.join (s)
constraints = {}
with open (sys.argv[1], encoding='utf-8') as f:
constraints_header = []
while True:
line = f.readline ().strip ()
if line == '#':
break
constraints_header.append(line)
for line in f:
j = line.find ('#')
if j >= 0:
line = line[:j]
constraint = [int (cp, 16) for cp in line.split (';')[0].split ()]
if not constraint: continue
assert 2 <= len (constraint), 'Prohibited sequence is too short: {}'.format (constraint)
script = scripts[constraint[0]]
if script in constraints:
constraints[script].add (constraint)
else:
constraints[script] = ConstraintSet (constraint)
assert constraints, 'No constraints found'
print ('# The following functions are generated by running:')
print ('# %s ms-use/IndicShapingInvalidCluster.txt' % sys.argv[0])
print("""
from fontFeatures.shaperLib.Buffer import BufferItem
DOTTED_CIRCLE = 0x25CC
def _insert_dotted_circle(buf, index):
dotted_circle = BufferItem.new_unicode(DOTTED_CIRCLE)
buf.items.insert(index, dotted_circle)
""")
print ('def preprocess_text_vowel_constraints(buffer):')
for script, constraints in sorted (constraints.items (), key=lambda s_c: script_order[s_c[0]]):
print(f' if buffer.script == "{script}":')
print (' i = 0')
print (' while i < len(buffer.items)-1:')
print (' matched = False')
write (str (constraints))
print (' i = i + 1')
print (' if matched: _insert_dotted_circle(buffer, i)')
| 37.770701
| 130
| 0.579764
| 738
| 5,930
| 4.533875
| 0.257453
| 0.031381
| 0.026898
| 0.028392
| 0.187986
| 0.13688
| 0.063957
| 0.049014
| 0.049014
| 0.049014
| 0
| 0.014654
| 0.286509
| 5,930
| 156
| 131
| 38.012821
| 0.776176
| 0.143508
| 0
| 0.117117
| 0
| 0
| 0.208135
| 0.054762
| 0
| 0
| 0.00119
| 0
| 0.036036
| 1
| 0.054054
| false
| 0
| 0.036036
| 0.009009
| 0.144144
| 0.09009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88f5b3a545f8379f5c6bd871ff166dd1442dd335
| 1,263
|
py
|
Python
|
solutions/validate-binary-search-tree.py
|
edab/-LC_StudyPlan_Python
|
e065f0ced68d23800d7b5001102c2e930ee35e23
|
[
"MIT"
] | null | null | null |
solutions/validate-binary-search-tree.py
|
edab/-LC_StudyPlan_Python
|
e065f0ced68d23800d7b5001102c2e930ee35e23
|
[
"MIT"
] | 1
|
2022-02-22T15:42:54.000Z
|
2022-02-25T00:10:04.000Z
|
solutions/validate-binary-search-tree.py
|
edab/-LC_StudyPlan_Python
|
e065f0ced68d23800d7b5001102c2e930ee35e23
|
[
"MIT"
] | null | null | null |
# Leetcode 98. Validate Binary Search Tree
#
# Link: https://leetcode.com/problems/validate-binary-search-tree/
# Difficulty: Medium
# Complexity:
# O(N) time | where N represent the number of elements in the input tree
# O(N) space | where N represent the number of elements in the input tree
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidBST(self, root: Optional[TreeNode]) -> bool:
def is_valid(node, left_limit, right_limit):
if not node:
return True
if not (left_limit < node.val < right_limit):
return False
return (is_valid(node.left, left_limit, node.val) and is_valid(node.right, node.val, right_limit))
def dfs_bfs_check_iterative(node):
if not node:
return True
stack = []
previous = None
while node or stack:
while node:
stack.append(node)
node = node.left
node = stack.pop()
if previous and node.val <= previous.val:
return False
previous = node
node = node.right
return True
return dfs_bfs_check_iterative(root)
| 30.071429
| 104
| 0.63658
| 173
| 1,263
| 4.537572
| 0.364162
| 0.035669
| 0.042038
| 0.061147
| 0.170701
| 0.122293
| 0.122293
| 0.122293
| 0.122293
| 0.122293
| 0
| 0.003264
| 0.272367
| 1,263
| 41
| 105
| 30.804878
| 0.850925
| 0.366587
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88f8317bf62d16d93d0f7dd37a85760c1a1014e1
| 763
|
py
|
Python
|
setup.py
|
stanford-ccb/ccb
|
ba75d490663958703f19e7a13f72001b050da229
|
[
"MIT"
] | 3
|
2020-02-13T00:49:06.000Z
|
2020-06-24T23:53:25.000Z
|
setup.py
|
stanford-ccb/ccb
|
ba75d490663958703f19e7a13f72001b050da229
|
[
"MIT"
] | null | null | null |
setup.py
|
stanford-ccb/ccb
|
ba75d490663958703f19e7a13f72001b050da229
|
[
"MIT"
] | 4
|
2020-01-29T17:21:59.000Z
|
2021-01-27T01:53:05.000Z
|
from setuptools import setup
version = open("ccb/__version__.py").read().strip('"\n')
setup_args = {
"name": "ccb",
"version": version,
"url": "https://github.com/earth-chris/ccb",
"license": "MIT",
"author": "Christopher Anderson",
"author_email": "cbanders@stanford.edu",
"description": "Species distribution modeling support tools",
"keywords": ["maxent", "biogeography", "SDM", "species distribution modeling", "ecologyy", "conservation"],
"packages": ["ccb"],
"include_package_data": True,
"platforms": "any",
"scripts": ["bin/gbif-to-vector.py", "bin/vector-to-maxent.py"],
"data_files": [("maxent", ["ccb/maxent/maxent.jar", "ccb/maxent/README.txt", "ccb/maxent/LICENSE.txt"])],
}
setup(**setup_args)
| 34.681818
| 111
| 0.643512
| 87
| 763
| 5.528736
| 0.643678
| 0.056133
| 0.112266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 763
| 21
| 112
| 36.333333
| 0.735474
| 0
| 0
| 0
| 0
| 0
| 0.585845
| 0.169069
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88f8e7ad2c848fa7633da12c05df70cdb4d3835a
| 1,576
|
py
|
Python
|
Unit4/Lesson8.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
Unit4/Lesson8.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
Unit4/Lesson8.py
|
szhua/PythonLearn
|
12eaf7cc74a0310bb23e21773f3c83deb91d0362
|
[
"Apache-2.0"
] | null | null | null |
# import time
#
# def reader():
# """A generator that fakes a read from a file, socket, etc."""
# for i in range(101):
# yield '<< %s' % i
#
# def consumer():
# r = ''
# while True:
# #但是Python的yield不但可以返回一个值,它还可以接收调用者发出的参数。
# #此处的n是接受参数
# n = yield from reader()
# print("===",n)
# if not n:
# return
# print('[CONSUMER] Consuming %s...' % n)
# r = '200 OK'
#
# def produce(c):
# c.send(None)
# n = 0
# while n < 100:
# n = n + 1
# print('[PRODUCER] Producing %s...' % n)
# r = c.send(n)
# print('[PRODUCER] Consumer return: %s' % r)
# c.close()
#
# c = consumer()
# produce(c)
# def getIN():
# for x in range(1000):
# n = yield x
# print(n,"--rer",x)
#
# ge =getIN()
#
# #开始
# ge.send(None)
# ge.send("11")
# ge.send("222")
def accumulate(): # 子生成器,将传进的非None值累加,传进的值若为None,则返回累加结果
tally = 0
while 1:
next = yield
if next is None:
return tally
tally += next
def gather_tallies(tallies): # 外部生成器,将累加操作任务委托给子生成器
while 1:
tally = yield from accumulate()
tallies.append(tally)
tallies = []
acc = gather_tallies(tallies)
next(acc) # 使累加生成器准备好接收传入值
for i in range(4):
acc.send(i)
acc.send(None) # 结束第一次累加
for i in range(5):
acc.send(i)
acc.send(None) # 结束第二次累加
print(tallies)
def get():
n =1
while True:
n+=1
if n>10:
break
yield
for x in get():
print(x)
| 17.909091
| 68
| 0.498096
| 198
| 1,576
| 3.954545
| 0.378788
| 0.03576
| 0.022989
| 0.042146
| 0.048531
| 0.048531
| 0
| 0
| 0
| 0
| 0
| 0.028155
| 0.346447
| 1,576
| 87
| 69
| 18.114943
| 0.732039
| 0.564721
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88fb6794a48a5fc109dca145fcd71d6498bacc28
| 1,288
|
py
|
Python
|
tools/transferComponentSelection.py
|
fsanges/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 165
|
2015-01-26T05:22:04.000Z
|
2022-03-22T02:50:41.000Z
|
tools/transferComponentSelection.py
|
qeeji/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 5
|
2015-12-02T02:39:44.000Z
|
2020-12-09T02:45:54.000Z
|
tools/transferComponentSelection.py
|
qeeji/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 83
|
2015-02-10T17:18:24.000Z
|
2022-02-10T07:16:47.000Z
|
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
def transferComponentSelection(sourceSelection,targetMesh,threshold=0.0001):
'''
'''
# Check selection target mesh
if not mc.objExists(targetMesh):
raise Exception('Target mesh "'+targetMesh+'" does not exist!')
# Flatten selection
sourceSelection = mc.ls(sourceSelection,fl=True)
# Get mesh points
tPtArray = glTools.utils.base.getMPointArray(targetMesh)
tPtLen = tPtArray.length()
# Initialize component selection transfer list
tPtBool = [False for i in range(tPtLen)]
# Initialize selection list
tSel = []
# Transfer selection
for sel in sourceSelection:
# Get selection point
pt = mc.pointPosition(sel)
pt = OpenMaya.MPoint(pt[0],pt[1],pt[2],1.0)
# Find closest component
cDist = 99999
cIndex = -1
for i in range(tPtLen):
# Check component selection transfer list
if tPtBool[i]: continue
# Check distance to current point
dist = (pt-tPtArray[i]).length()
if dist < cDist:
cDist = dist
cIndex = i
# Test threshold
if dist < threshold: break
# Append selection
tSel.append(targetMesh+'.vtx['+str(cIndex)+']')
# Update component selection transfer list
tPtBool[i] = True
# Return result
return tSel
| 22.596491
| 76
| 0.699534
| 162
| 1,288
| 5.561728
| 0.450617
| 0.059933
| 0.08657
| 0.099889
| 0.119867
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015504
| 0.198758
| 1,288
| 56
| 77
| 23
| 0.857558
| 0.274068
| 0
| 0
| 0
| 0
| 0.039474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88fe054080de49b8785340e2f3ce23ac82e4a3fa
| 324
|
py
|
Python
|
the_office/test.py
|
zubyjaved/reddit-bots
|
9f15f5ee9eede5223c975c29527c9e58d68bb517
|
[
"MIT"
] | 2
|
2019-09-07T09:40:23.000Z
|
2021-06-19T08:40:00.000Z
|
the_office/test.py
|
zubyjaved/reddit-bots
|
9f15f5ee9eede5223c975c29527c9e58d68bb517
|
[
"MIT"
] | 2
|
2019-09-05T04:42:23.000Z
|
2019-09-05T04:44:37.000Z
|
the_office/test.py
|
zubyjaved/reddit-bots
|
9f15f5ee9eede5223c975c29527c9e58d68bb517
|
[
"MIT"
] | null | null | null |
import json
import praw
reddit = praw.Reddit("dwight-schrute-bot")
for submission in reddit.subreddit('all').rising(limit=15):
submission.comments.replace_more(limit=None)
print(submission.subreddit.display_name)
if not submission.over_18:
for comment in submission.comments.list():
print()
| 29.454545
| 59
| 0.722222
| 42
| 324
| 5.5
| 0.666667
| 0.08658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014815
| 0.166667
| 324
| 11
| 60
| 29.454545
| 0.840741
| 0
| 0
| 0
| 0
| 0
| 0.064615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88fe820e78b74b43c84647fdd224db13efd8f585
| 1,311
|
py
|
Python
|
Scripts/Client/ManualControlTest.py
|
Fzeak/sauvc-2019
|
573dcb351d0f87f9b7605667c570a5003bedb224
|
[
"MIT"
] | null | null | null |
Scripts/Client/ManualControlTest.py
|
Fzeak/sauvc-2019
|
573dcb351d0f87f9b7605667c570a5003bedb224
|
[
"MIT"
] | null | null | null |
Scripts/Client/ManualControlTest.py
|
Fzeak/sauvc-2019
|
573dcb351d0f87f9b7605667c570a5003bedb224
|
[
"MIT"
] | null | null | null |
from pymavlink import mavutil
import time
# Create the connection
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
# Wait a heartbeat before sending commands
master.wait_heartbeat()
# Send a positive x value, negative y, negative z,
# positive rotation and no button.
# http://mavlink.org/messages/common#MANUAL_CONTROL
# Warning: Because of some legacy workaround, z will work between [0-1000]
# where 0 is full reverse, 500 is no output and 1000 is full throttle.
# x,y and r will be between [-1000 and 1000].
master.mav.manual_control_send(
master.target_system,
500,
-500,
250,
500,
0)
# To active button 0 (first button), 3 (fourth button) and 7 (eighth button)
# It's possible to check and configure this buttons in the Joystick menu of QGC
buttons = 1 + 1 << 3 + 1 << 7
master.mav.manual_control_send(
master.target_system,
0,
0,
0,
0,
buttons)
# Request all parameters
master.mav.param_request_list_send(
master.target_system, master.target_component
)
while True:
time.sleep(0.01)
try:
message = master.recv_match(type='PARAM_VALUE', blocking=True).to_dict()
print('name: %s\tvalue: %d' % (message['param_id'].decode("utf-8"), message['param_value']))
except Exception as e:
print(e)
exit(0)
| 28.5
| 100
| 0.695652
| 200
| 1,311
| 4.465
| 0.54
| 0.013438
| 0.013438
| 0.073908
| 0.098544
| 0.098544
| 0.098544
| 0.098544
| 0
| 0
| 0
| 0.05698
| 0.196796
| 1,311
| 45
| 101
| 29.133333
| 0.791073
| 0.423341
| 0
| 0.333333
| 0
| 0
| 0.09825
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00002c1133ee1a3e69c2c023cddb9b34c36440ca
| 1,634
|
py
|
Python
|
setup.py
|
DNA-and-Natural-Algorithms-Group/peppercompiler
|
effbcdedfb17534300fb3504a552e46c1ead41e4
|
[
"MIT"
] | 3
|
2019-06-10T18:44:03.000Z
|
2021-11-17T10:57:09.000Z
|
setup.py
|
DNA-and-Natural-Algorithms-Group/peppercompiler
|
effbcdedfb17534300fb3504a552e46c1ead41e4
|
[
"MIT"
] | 2
|
2017-12-15T01:09:49.000Z
|
2021-03-25T20:42:23.000Z
|
setup.py
|
DNA-and-Natural-Algorithms-Group/peppercompiler
|
effbcdedfb17534300fb3504a552e46c1ead41e4
|
[
"MIT"
] | 4
|
2017-08-21T03:32:51.000Z
|
2019-10-18T04:09:38.000Z
|
#!/usr/bin/env python
from setuptools import setup
from distutils.command.build import build
from setuptools.command.develop import develop
class build_with_spurious(build):
def run(self):
import os
if "CC" in os.environ:
cc = os.environ['CC']
else:
cc = "cc"
os.system(
"{} -Wall -O3 peppercompiler/SpuriousDesign/spuriousSSM.c -o peppercompiler/_spuriousSSM -lm".
format(cc))
build.run(self)
class develop_with_spurious(develop):
def run(self):
import os
os.system(
"cc -Wall -O3 peppercompiler/SpuriousDesign/spuriousSSM.c -o peppercompiler/_spuriousSSM -lm"
)
develop.run(self)
setup(
name="peppercompiler",
version="0.1.3",
packages=['peppercompiler', 'peppercompiler.design'],
install_requires=["pyparsing", "six"],
include_package_data=True,
package_data={
'peppercompiler': ['_spuriousSSM', 'SpuriousDesign/spuriousSSM.c']
},
test_suite='peppercompiler.tests',
cmdclass={'build': build_with_spurious,
'develop': develop_with_spurious},
entry_points={
'console_scripts': [
'pepper-compiler = peppercompiler.compiler:main',
'pepper-design-spurious = peppercompiler.design.spurious_design:main',
'pepper-finish = peppercompiler.finish:main',
'spuriousSSM = peppercompiler._spuriousSSM_wrapper:main'
]
},
author="Constantine Evans et al (this version)",
author_email="cge@dna.caltech.edu",
description="PepperCompiler in a pythonic form")
| 29.709091
| 106
| 0.641983
| 170
| 1,634
| 6.041176
| 0.452941
| 0.046738
| 0.075949
| 0.031159
| 0.179163
| 0.144109
| 0.144109
| 0.144109
| 0.144109
| 0.144109
| 0
| 0.004036
| 0.241738
| 1,634
| 54
| 107
| 30.259259
| 0.824859
| 0.01224
| 0
| 0.136364
| 0
| 0
| 0.405456
| 0.215127
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00004b28f6ae2b9a9b673b26fbf0fba70c90416d
| 1,126
|
py
|
Python
|
client/client.py
|
flavioribeiro/playmobil
|
d104b80fd666158e7ae3d1e28ce8d3ba68e93a68
|
[
"Apache-2.0"
] | 1
|
2016-10-27T21:30:30.000Z
|
2016-10-27T21:30:30.000Z
|
client/client.py
|
flavioribeiro/playmobil
|
d104b80fd666158e7ae3d1e28ce8d3ba68e93a68
|
[
"Apache-2.0"
] | null | null | null |
client/client.py
|
flavioribeiro/playmobil
|
d104b80fd666158e7ae3d1e28ce8d3ba68e93a68
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append("/Library/Frameworks/GStreamer.framework/Versions/0.10/lib/python2.7/site-packages/")
import gobject
gobject.threads_init()
import pygst
pygst.require("0.10")
import gst
class Client(object):
def __init__(self):
self.pipeline = gst.Pipeline('client')
self.videotestsrc = self.create_element('videotestsrc', 'video')
self.theoraenc = self.create_element('theoraenc', 'encoder')
self.oggmux = self.create_element('oggmux', 'muxer')
self.tcpserversink = self.create_element('tcpserversink', 'serversink')
self.tcpserversink.set_property('host', '0.0.0.0')
self.tcpserversink.set_property('port', 8080)
self.pipeline.add(self.videotestsrc, self.theoraenc, self.oggmux, self.tcpserversink)
gst.element_link_many(self.videotestsrc, self.theoraenc, self.oggmux, self.tcpserversink)
def create_element(self, element, name):
return gst.element_factory_make(element, name)
def start(self):
self.pipeline.set_state(gst.STATE_PLAYING)
client = Client()
client.start()
loop = gobject.MainLoop()
loop.run()
| 34.121212
| 101
| 0.71492
| 139
| 1,126
| 5.661871
| 0.402878
| 0.082592
| 0.086404
| 0.071156
| 0.142313
| 0.142313
| 0.142313
| 0.142313
| 0
| 0
| 0
| 0.016736
| 0.150977
| 1,126
| 32
| 102
| 35.1875
| 0.806485
| 0
| 0
| 0
| 0
| 0.038462
| 0.154529
| 0.072824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.153846
| 0.038462
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00005f4a70c5144076952dbbb1c77de24a5e43d7
| 3,852
|
py
|
Python
|
InternetSemLimites/api/tests/test_edit_view.py
|
InternetSemLimites/PublicAPI
|
3dd0f17fe66688ef2895de540950f45d69bcd9d8
|
[
"MIT"
] | 18
|
2016-04-14T17:03:29.000Z
|
2020-01-01T00:54:03.000Z
|
InternetSemLimites/api/tests/test_edit_view.py
|
InternetSemLimites/PublicAPI
|
3dd0f17fe66688ef2895de540950f45d69bcd9d8
|
[
"MIT"
] | 48
|
2016-04-15T12:33:33.000Z
|
2018-01-25T16:01:45.000Z
|
InternetSemLimites/api/tests/test_edit_view.py
|
InternetSemLimites/PublicAPI
|
3dd0f17fe66688ef2895de540950f45d69bcd9d8
|
[
"MIT"
] | 4
|
2016-04-15T07:57:04.000Z
|
2017-09-10T18:10:40.000Z
|
from django.contrib.auth.models import User
from django.core import mail
from django.shortcuts import resolve_url
from django.test import TestCase
from InternetSemLimites.core.forms import ProviderForm
from InternetSemLimites.core.models import Provider, State
class TestPostValid(TestCase):
def setUp(self):
User.objects.create_superuser(username='two', password='', email='42@xp.to')
sc, *_ = State.objects.get_or_create(abbr='SC', name='Santa Catarina')
go, *_ = State.objects.get_or_create(abbr='GO', name='Goiás')
self.provider = Provider.objects.create(
name='Xpto',
url='http://xp.to',
source='http://twitter.com/xpto',
category=Provider.SHAME,
other='Lorem ipsum'
)
self.provider.coverage.add(sc)
self.provider.coverage.add(go)
self.data = {
'name': 'XptoEdited',
'url': 'http://xpedited.to',
'source': 'http://twitter.com/xptoedited',
'coverage': [sc.pk],
'category': Provider.FAME,
'other': 'Lorem ipsum dolor'
}
self.resp = self.client.post(resolve_url('api:provider', self.provider.pk), self.data)
self.edited_provider = Provider.objects.last()
def test_not_allowed_methods(self):
url = resolve_url('api:provider', self.provider.pk)
for r in (self.client.delete(url), self.client.patch(url, self.data)):
with self.subTest():
self.assertEqual(405, r.status_code)
def test_post(self):
self.assertRedirects(self.resp, resolve_url('api:provider', self.edited_provider.pk))
def test_send_email(self):
self.assertEqual(1, len(mail.outbox))
def test_edit(self):
edited_provider_coverage_ids = [state.id for state in self.edited_provider.coverage.all()]
self.assertEqual(self.edited_provider.name, self.data['name'])
self.assertEqual(self.edited_provider.url, self.data['url'])
self.assertEqual(self.edited_provider.source, self.data['source'])
self.assertEqual(self.edited_provider.category, self.data['category'])
self.assertEqual(self.edited_provider.other, self.data['other'])
self.assertEqual(edited_provider_coverage_ids, self.data['coverage'])
class TestPostInvalid(TestCase):
def setUp(self):
User.objects.create_superuser(username='two', password='', email='42@xp.to')
sc, *_ = State.objects.get_or_create(abbr='SC', name='Santa Catarina')
go, *_ = State.objects.get_or_create(abbr='GO', name='Goiás')
self.provider = Provider.objects.create(
name='Xpto',
url='http://xp.to',
source='http://twitter.com/xpto',
category=Provider.SHAME,
other='Lorem ipsum'
)
self.provider.coverage.add(sc)
self.provider.coverage.add(go)
self.resp = self.client.post(resolve_url('api:provider', self.provider.pk), dict())
def test_post(self):
self.assertEqual(422, self.resp.status_code)
def test_has_errors_on_empty_form(self):
json_resp = self.resp.json()
self.assertTrue(json_resp['errors'])
def test_has_errors_on_non_empty_form(self):
invalid_data = {'name': 'Xpto', 'coverage': ['xp', 'to'], 'url': ''}
resp = self.client.post(resolve_url('api:provider', self.provider.pk), invalid_data)
json_resp = resp.json()
errors = json_resp['errors']
with self.subTest():
self.assertEqual('Este campo é obrigatório.', errors['category'][0])
self.assertEqual('Este campo é obrigatório.', errors['source'][0])
self.assertEqual('Este campo é obrigatório.', errors['url'][0])
self.assertIn('não é um valor válido', errors['coverage'][0])
| 39.306122
| 98
| 0.637072
| 474
| 3,852
| 5.048523
| 0.236287
| 0.075219
| 0.067697
| 0.043878
| 0.550355
| 0.412035
| 0.412035
| 0.379858
| 0.34392
| 0.34392
| 0
| 0.004988
| 0.219367
| 3,852
| 97
| 99
| 39.71134
| 0.790821
| 0
| 0
| 0.363636
| 0
| 0
| 0.136552
| 0
| 0
| 0
| 0
| 0
| 0.194805
| 1
| 0.116883
| false
| 0.025974
| 0.077922
| 0
| 0.220779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000069dbeffca39b13535cbec664af30b8b425d2
| 351
|
py
|
Python
|
algorithm-study/therory/selectionSort.py
|
Seongkyun-Yu/TIL
|
2be6a2a68246bc98996b1421e2cc20e025c876ed
|
[
"MIT"
] | 1
|
2020-02-17T15:15:55.000Z
|
2020-02-17T15:15:55.000Z
|
algorithm-study/therory/selectionSort.py
|
Seongkyun-Yu/TIL
|
2be6a2a68246bc98996b1421e2cc20e025c876ed
|
[
"MIT"
] | 6
|
2020-07-31T17:03:56.000Z
|
2022-02-27T04:17:57.000Z
|
algorithm-study/therory/selectionSort.py
|
Seongkyun-Yu/TIL
|
2be6a2a68246bc98996b1421e2cc20e025c876ed
|
[
"MIT"
] | null | null | null |
import random
data_list = random.sample(range(100), 50)
def selectionSort(arr):
for index1 in range(len(arr) - 1):
lowestIndex = index1
for index2 in range(index1, len(arr)):
if(arr[lowestIndex] > arr[index2]):
lowestIndex = index2
arr[index1] = arr[lowestIndex]
return arr
print(selectionSort(data_list))
| 18.473684
| 42
| 0.65812
| 45
| 351
| 5.088889
| 0.466667
| 0.069869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047794
| 0.225071
| 351
| 18
| 43
| 19.5
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00021f532b1a8ddd0a8c15783c8737edde030453
| 12,045
|
py
|
Python
|
platform/core/polyaxon/monitor_statuses/monitor.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/monitor_statuses/monitor.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/monitor_statuses/monitor.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Any, Mapping
import redis
import conf
import ocular
import workers
from constants.experiment_jobs import get_experiment_job_uuid
from db.redis.containers import RedisJobContainers
from db.redis.statuses import RedisStatuses
from lifecycles.jobs import JobLifeCycle
from options.registry.container_names import (
CONTAINER_NAME_BUILD_JOBS,
CONTAINER_NAME_EXPERIMENT_JOBS,
CONTAINER_NAME_JOBS,
CONTAINER_NAME_PLUGIN_JOBS,
CONTAINER_NAME_PYTORCH_JOBS,
CONTAINER_NAME_TF_JOBS
)
from options.registry.spawner import (
APP_LABELS_DOCKERIZER,
APP_LABELS_EXPERIMENT,
APP_LABELS_JOB,
APP_LABELS_NOTEBOOK,
APP_LABELS_TENSORBOARD,
ROLE_LABELS_DASHBOARD,
ROLE_LABELS_WORKER,
TYPE_LABELS_RUNNER
)
from options.registry.ttl import TTL_WATCH_STATUSES
from polyaxon.settings import K8SEventsCeleryTasks
logger = logging.getLogger('polyaxon.monitors.statuses')
def update_job_containers(event: Mapping,
status: str,
job_container_name: str) -> None:
job_containers = RedisJobContainers()
if JobLifeCycle.is_done(status):
# Remove the job monitoring
job_uuid = event['metadata']['labels']['job_uuid']
logger.info('Stop monitoring job_uuid: %s', job_uuid)
job_containers.remove_job(job_uuid)
if event['status']['container_statuses'] is None:
return
def get_container_id(container_id):
if not container_id:
return None
if container_id.startswith('docker://'):
return container_id[len('docker://'):]
return container_id
for container_status in event['status']['container_statuses']:
if container_status['name'] != job_container_name:
continue
container_id = get_container_id(container_status['container_id'])
if container_id:
job_uuid = event['metadata']['labels']['job_uuid']
if container_status['state']['running'] is not None:
logger.info('Monitoring (container_id, job_uuid): (%s, %s)',
container_id, job_uuid)
job_containers.monitor(container_id=container_id, job_uuid=job_uuid)
else:
job_containers.remove_container(container_id=container_id)
def get_restart_count(event: Mapping, job_container_name: str) -> int:
if event['status']['container_statuses'] is None:
return 0
for container_status in event['status']['container_statuses']:
if container_status['name'] != job_container_name:
continue
return container_status['restart_count'] or 0
return 0
def get_label_selector() -> str:
return 'role in ({},{}),type={}'.format(
conf.get(ROLE_LABELS_WORKER),
conf.get(ROLE_LABELS_DASHBOARD),
conf.get(TYPE_LABELS_RUNNER))
def should_handle_job_status(pod_state: Any, status: str) -> bool:
job_uuid = pod_state['details']['labels']['job_uuid']
current_status = RedisStatuses.get_status(job=job_uuid)
if not current_status: # If the status does not exist or is evicted
return True
try:
return JobLifeCycle.can_transition(status_from=RedisStatuses.get_status(job=job_uuid),
status_to=status)
except redis.connection.ConnectionError:
return True
def handle_job_condition(event_object,
pod_state,
status,
labels,
container_name,
task_name,
update_containers):
if update_containers:
try:
update_job_containers(event_object, status, container_name)
except redis.connection.ConnectionError:
pass
# Handle experiment job statuses
if should_handle_job_status(pod_state=pod_state, status=status):
logger.debug("Sending state to handler %s, %s", status, labels)
restart_count = get_restart_count(event_object, container_name)
pod_state['restart_count'] = restart_count or 0
workers.send(task_name, kwargs={'payload': pod_state}, countdown=None)
def run(k8s_manager: 'K8SManager') -> None:
# pylint:disable=too-many-branches
# Local cache
label_selector = get_label_selector()
container_name_experiment_job = conf.get(CONTAINER_NAME_EXPERIMENT_JOBS)
container_name_tf_job = conf.get(CONTAINER_NAME_TF_JOBS)
container_name_pytorch_job = conf.get(CONTAINER_NAME_PYTORCH_JOBS)
container_name_plugin_job = conf.get(CONTAINER_NAME_PLUGIN_JOBS)
container_name_job = conf.get(CONTAINER_NAME_JOBS)
container_name_build_job = conf.get(CONTAINER_NAME_BUILD_JOBS)
watch_ttl = conf.get(TTL_WATCH_STATUSES)
app_labels_experiment = conf.get(APP_LABELS_EXPERIMENT)
app_labels_job = conf.get(APP_LABELS_JOB)
app_labels_build_job = conf.get(APP_LABELS_DOCKERIZER)
app_labels_tensorboard = conf.get(APP_LABELS_TENSORBOARD)
app_labels_notebook = conf.get(APP_LABELS_NOTEBOOK)
for (event_object, pod_state) in ocular.monitor(k8s_manager.k8s_api,
namespace=k8s_manager.namespace,
container_names=(
container_name_experiment_job,
container_name_tf_job,
container_name_pytorch_job,
container_name_plugin_job,
container_name_job,
container_name_build_job),
label_selector=label_selector,
return_event=True,
watch_ttl=watch_ttl):
logger.debug('-------------------------------------------\n%s\n', pod_state)
if not pod_state:
continue
status = pod_state['status']
labels = None
if pod_state['details'] and pod_state['details']['labels']:
labels = pod_state['details']['labels']
logger.info("Updating job container %s, %s", status, labels)
experiment_condition = status and labels['app'] == app_labels_experiment
experiment_job_condition = (
container_name_experiment_job in pod_state['details']['container_statuses']
or 'job_uuid' in labels
)
tf_job_condition = (
container_name_tf_job in pod_state['details']['container_statuses']
or 'tf-replica-index' in labels
)
mpi_job_condition = 'mpi_job_name' in labels
pytorch_job_condition = (
container_name_pytorch_job in pod_state['details']['container_statuses']
or 'pytroch-replica-index' in labels
)
job_condition = (
container_name_job in pod_state['details']['container_statuses'] or
(status and labels['app'] == app_labels_job)
)
plugin_job_condition = (
container_name_plugin_job in pod_state['details']['container_statuses'] or
(status and
labels['app'] in (app_labels_tensorboard, app_labels_notebook))
)
dockerizer_job_condition = (
container_name_build_job in pod_state['details']['container_statuses']
or (status and labels['app'] == app_labels_build_job)
)
if experiment_condition:
if tf_job_condition:
# We augment the payload with standard Polyaxon requirement
pod_state['details']['labels']['job_uuid'] = get_experiment_job_uuid(
experiment_uuid=labels['experiment_uuid'],
task_type=labels['task_type'],
task_index=labels['tf-replica-index']
)
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_tf_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_EXPERIMENT_JOB_STATUSES,
update_containers=False
)
elif pytorch_job_condition:
# We augment the payload with standard Polyaxon requirement
pod_state['details']['labels']['job_uuid'] = get_experiment_job_uuid(
experiment_uuid=labels['experiment_uuid'],
task_type=labels['task_type'],
task_index=labels['pytorch-replica-index']
)
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_pytorch_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_EXPERIMENT_JOB_STATUSES,
update_containers=False
)
elif mpi_job_condition:
job_name = pod_state['details']['pod_name']
parts = job_name.split('-')
if len(parts) != 4:
continue
# We augment the payload with standard Polyaxon requirement
pod_state['details']['labels']['job_uuid'] = get_experiment_job_uuid(
experiment_uuid=labels['experiment_uuid'],
task_type=labels['task_type'],
task_index=parts[-1]
)
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_experiment_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_EXPERIMENT_JOB_STATUSES,
update_containers=False
)
elif experiment_job_condition:
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_experiment_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_EXPERIMENT_JOB_STATUSES,
update_containers=False
)
elif job_condition:
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_JOB_STATUSES,
update_containers=False
)
elif plugin_job_condition:
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_plugin_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_PLUGIN_JOB_STATUSES,
update_containers=False
)
elif dockerizer_job_condition:
handle_job_condition(
event_object=event_object,
pod_state=pod_state,
status=status,
labels=labels,
container_name=container_name_build_job,
task_name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_BUILD_JOB_STATUSES,
update_containers=False
)
else:
logger.info("Lost state %s, %s", status, pod_state)
| 39.491803
| 94
| 0.594687
| 1,234
| 12,045
| 5.423825
| 0.120746
| 0.099059
| 0.031376
| 0.025549
| 0.514269
| 0.466906
| 0.37457
| 0.344091
| 0.314059
| 0.314059
| 0
| 0.003225
| 0.330677
| 12,045
| 304
| 95
| 39.621711
| 0.826966
| 0.026401
| 0
| 0.313492
| 0
| 0
| 0.080901
| 0.009985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.003968
| 0.055556
| 0.003968
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0003b8ab877e0ee926932cb4211e0799fd7d5511
| 14,636
|
py
|
Python
|
flow65/wing_tool.py
|
corygoates/Flow65
|
148eddaeeed8711eae37a16820215c89f93f01d5
|
[
"MIT"
] | null | null | null |
flow65/wing_tool.py
|
corygoates/Flow65
|
148eddaeeed8711eae37a16820215c89f93f01d5
|
[
"MIT"
] | null | null | null |
flow65/wing_tool.py
|
corygoates/Flow65
|
148eddaeeed8711eae37a16820215c89f93f01d5
|
[
"MIT"
] | null | null | null |
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
class Wing:
"""A class for modeling a finite wing using the sine-series solution to Prandtl's lifting-line equation.
Parameters
----------
planform : str
May be "elliptic" or "tapered".
b : float
Wingspan.
AR : float
Aspect ratio.
RT : float
Taper ratio. Only required for "tapered" planform.
CL_a_section : float, optional
Section lift slope. Defaults to 2 pi.
washout : str
May be "none", "linear", or "optimum".
washout_mag : float
Magnitude of the washout at the tip in degrees.
washout_CLd : float
Design lift coefficient for washout. Only required if "washout"
is "optimum".
aileron_lims : list, optional
Aileron limits as a fraction of the span. Defaults to entire span.
aileron_cf: list, optional
Aileron chord fractions at the root and tip of the ailerons. Defaults to 0.0.
aileron_hinge_eff : float, optional
Aileron hinge efficiency. Defaults to 1.0.
"""
def __init__(self, **kwargs):
# Get planform parameters
self._planform_type = kwargs["planform"]
self._AR = kwargs["AR"]
self._b = kwargs["b"]
if self._planform_type == "tapered":
self._RT = kwargs["RT"]
self._CL_a_s = kwargs.get("CL_a_section", 2.0*np.pi)
# Get washout parameters
self._washout_type = kwargs.get("washout", "none")
if self._washout_type != "none":
self._W = np.radians(kwargs.get("washout_mag", 0.0))
else:
self._W = 0.0
if self._washout_type == "optimum":
self._CL_d = kwargs["washout_CLd"]
# Get aileron parameters
self._aln_lims = kwargs.get("aileron_lims", [0.0, 0.5])
self._aln_cf = kwargs.get("aileron_cf", [0.0, 0.0])
self._aln_e_hinge = kwargs.get("aileron_hinge_eff", 1.0)
def set_grid(self, N):
"""Sets the spanwise grid for the wing. Uses cosine clustering
Parameters
----------
N : int
Number of nodes per semispan to specify. Note that one node will
be placed at the root, making the total number of nodes 2N-1.
"""
np.set_printoptions(linewidth=np.inf, precision=12)
# Create theta and z distributions
self._N = 2*N-1
self._theta = np.linspace(0, np.pi, self._N)
self._z = -0.5*self._b*np.cos(self._theta)
# Calculate control point trig values
self._N_range = np.arange(1, self._N+1)
self._S_theta = np.sin(self._theta)
# Calculate chord distribution
if self._planform_type == "elliptic":
self._c_b = 4.0*self._S_theta/(np.pi*self._AR)
else:
self._c_b = 2.0*(1.0-(1.0-self._RT)*np.abs(np.cos(self._theta)))/(self._AR*(1.0+self._RT))
self._c_b = np.where(self._c_b==0.0, 1e-6, self._c_b)
# Calculate washout distribution
if self._washout_type == "none":
self._w = np.zeros(self._N)
elif self._washout_type == "linear":
self._w = np.abs(np.cos(self._theta))
elif self._washout_type == "optimum":
self._w = 1.0-self._S_theta*self._c_b[self._N//2]/self._c_b
self._W = 4.0*self._CL_d/(np.pi*self._AR*self._CL_a_s*self._c_b[self._N//2])
# Determine aileron chord fractions
self._cf = np.zeros(self._N)
z_in_aileron = ((self._z>self._aln_lims[0]) & (self._z<self._aln_lims[1])) | ((self._z>-self._aln_lims[1]) & (self._z<-self._aln_lims[0]))
if self._planform_type == "elliptic":
self._c_aln_tip = (4.0/(np.pi*self._AR)*np.sqrt(1.0-(2.0*self._aln_lims[1])**2))
self._c_aln_root = (4.0/(np.pi*self._AR)*np.sqrt(1.0-(2.0*self._aln_lims[0])**2))
else:
self._c_aln_tip = (2.0/(self._AR*(1.0+self._RT))*(1.0-(1.0-self._RT)*2.0*self._aln_lims[1]))
self._c_aln_root = (2.0/(self._AR*(1.0+self._RT))*(1.0-(1.0-self._RT)*2.0*self._aln_lims[0]))
self._x_h_tip = -(1.0-self._aln_cf[1]-0.25)*self._c_aln_tip
self._x_h_root = -(1.0-self._aln_cf[0]-0.25)*self._c_aln_root
aln_b = (self._x_h_tip-self._x_h_root)/(self._aln_lims[1]-self._aln_lims[0])
x_h = z_in_aileron[self._N//2:]*(self._x_h_root+(self._z[self._N//2:]-self._aln_lims[0])*aln_b)
self._cf[self._N//2:] = 1.0-(-x_h/self._c_b[self._N//2:]+0.25)
self._cf[self._N//2::-1] = 1.0-(-x_h/self._c_b[self._N//2:]+0.25)
self._cf *= z_in_aileron
# Determine flap efficiency
theta_f = np.arccos(2.0*self._cf-1.0)
self._e_f = (1.0-(theta_f-np.sin(theta_f))/np.pi)*self._aln_e_hinge
self._e_f[self._N//2:] *= -1.0
# Get C matrix
self._C = np.zeros((self._N, self._N))
self._C[0,:] = self._N_range**2
self._C[1:-1,:] = (4.0/(self._CL_a_s*self._c_b[1:-1,np.newaxis])+self._N_range[np.newaxis,:]/self._S_theta[1:-1,np.newaxis])*np.sin(self._N_range[np.newaxis,:]*self._theta[1:-1,np.newaxis])
self._C[-1,:] = (-1.0)**(self._N_range+1)*self._N_range**2
# Get C inverse (why on earth, I have no idea...)
self._C_inv = np.linalg.inv(self._C)
# Determine the Fourier coefficients
self._a_n = np.linalg.solve(self._C, np.ones(self._N))
self._b_n = np.linalg.solve(self._C, self._w)
self._c_n = np.linalg.solve(self._C, self._e_f)
self._d_n = np.linalg.solve(self._C, np.cos(self._theta))
# Determine coefficient slopes
self.CL_a = np.pi*self._AR*self._a_n[0]
# Determine the kappa factors due to planform
self.K_D = np.sum(np.arange(2, self._N+1)*self._a_n[1:]**2/self._a_n[0]**2)
A = (1+np.pi*self._AR/self._CL_a_s)*self._a_n[0]
self.K_L = (1.0-A)/A
# Determine span efficiency factor
self.e_s = 1.0/(1.0+self.K_D)
# Determine kappa factors due to washout
if self._washout_type != "none":
self.e_omega = self._b_n[0]/self._a_n[0]
self.K_DL = 2.0*self._b_n[0]/self._a_n[0]*np.sum(self._N_range[1:]*self._a_n[1:]/self._a_n[0]*(self._b_n[1:]/self._b_n[0]-self._a_n[1:]/self._a_n[0]))
self.K_Domega = (self._b_n[0]/self._a_n[0])**2*np.sum(self._N_range[1:]*(self._b_n[1:]/self._b_n[0]-self._a_n[1:]/self._a_n[0])**2)
self.K_Do = self.K_D-0.25*self.K_DL**2/self.K_Domega
else:
self.e_omega = 0.0
self.K_DL = 0.0
self.K_Domega = 0.0
self.K_Do = 0.0
# Determine aileron and roll derivatives
self.Cl_da = -0.25*np.pi*self._AR*self._c_n[1]
self.Cl_p = -0.25*np.pi*self._AR*self._d_n[1]
def set_condition(self, **kwargs):
"""Sets atmospheric condition for the wing.
Parameters
----------
alpha : float
Angle of attack in degrees.
V_inf : float
Freestream velocity.
da : float, optional
Aileron deflection in degrees. Defaults to 0.0.
p_bar : float or string, optional
Nondimensional rolling rate. May be "steady" to imply the steady roll rate should be solved for. Defaults to 0.0.
"""
# Store condition
self._alpha = np.radians(kwargs["alpha"])
self._V_inf = kwargs["V_inf"]
self._da = np.radians(kwargs.get("da", 0.0))
self._p_bar = kwargs.get("p_bar", 0.0)
def solve(self):
"""Solves for the aerodynamic coefficients at the current condition."""
# Determine rolling moment/rate
if self._p_bar == "steady":
self.Cl = 0.0
self.p_bar = -self.Cl_da*self._da/self.Cl_p
else:
self.p_bar = self._p_bar
self.Cl = self.Cl_da*self._da+self.Cl_p*self.p_bar
# Determine Fourier coefficients dependent on condition
self._A_n = self._a_n*(self._alpha)-self._b_n*self._W+self._c_n*self._da+self._d_n*self.p_bar
# Determine lift coefficient
self.CL = np.pi*self._AR*self._A_n[0]
# Calculate gamma distribution
An_sin_n0 = self._A_n[np.newaxis,:]*np.sin(self._N_range[np.newaxis,:]*self._theta[:,np.newaxis])
self.gamma = 2.0*self._b*self._V_inf*np.sum(An_sin_n0, axis=1).flatten()
# Determine drag coefficient with and without rolling and aileron effects
self.CD_i = np.pi*self._AR*np.sum(self._N_range*self._A_n**2) # With
self.CD_i_simp = (self.CL**2*(1.0+self.K_D)-self.K_DL*self.CL*self.CL_a*self._W+self.K_Domega*(self.CL_a*self._W)**2)/(np.pi*self._AR)
# Determine yawing moment
C = 0.0
for i in range(3, self._N):
C += (2.0*i+1)*self._A_n[i-1]*self._A_n[i]
self.Cn = 0.125*self.CL*(6.0*self._A_n[1]-self.p_bar)+0.125*np.pi*self._AR*(10.0*self._A_n[1]-self.p_bar)*self._A_n[2]+0.25*np.pi*self._AR*C
def plot_planform(self):
"""Shows a plot of the planform."""
# Get leading and trailing edge points
x_le = np.zeros(self._N+2)
x_te = np.zeros(self._N+2)
x_le[1:-1] = 0.25*self._c_b
x_te[1:-1] = -0.75*self._c_b
z = np.zeros(self._N+2)
z[0] = self._z[0]
z[1:-1] = self._z
z[-1] = self._z[-1]
# Plot outline and LQC
plt.figure()
plt.plot(z, x_le, 'k-')
plt.plot(z, x_te, 'k-')
plt.plot(z, np.zeros(self._N+2), 'b-', label='c/4')
# Plot spanwise stations
for i in range(self._N):
plt.plot([z[i+1], z[i+1]], [x_le[i+1], x_te[i+1]], 'b--')
# Plot ailerons
plt.plot([self._aln_lims[0], self._aln_lims[0], self._aln_lims[1], self._aln_lims[1]],
[-0.75*self._c_aln_root, self._x_h_root, self._x_h_tip, -0.75*self._c_aln_tip],
'k-')
plt.plot([-self._aln_lims[0], -self._aln_lims[0], -self._aln_lims[1], -self._aln_lims[1]],
[-0.75*self._c_aln_root, self._x_h_root, self._x_h_tip, -0.75*self._c_aln_tip],
'k-')
# Plot labels
plt.xlabel('z/b')
plt.ylabel('x/b')
plt.title('Planform')
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(loc='upper right')
plt.show()
def plot_washout(self):
"""Plots the washout distribution on the wing."""
plt.figure()
plt.plot(self._z, self._w, 'k-')
plt.xlabel("z/b")
plt.ylabel("Washout [deg]")
plt.title("Washout Distribution")
plt.show()
def plot_aileron(self):
"""Plots the aileron deflection distribution on the wing."""
plt.figure()
plt.plot(self._z, self._e_f, 'k-')
plt.xlabel("z/b")
plt.ylabel("Aileron Effectiveness")
plt.title("Aileron Effectiveness")
plt.show()
if __name__=="__main__":
# Read in input
input_file = sys.argv[-1]
with open(input_file, 'r') as input_handle:
input_dict = json.load(input_handle)
# Initialize wing
wing_dict = input_dict["wing"]
washout_dict = input_dict["wing"]["washout"]
aileron_dict = input_dict["wing"]["aileron"]
wing = Wing(planform=wing_dict["planform"]["type"],
AR=wing_dict["planform"]["aspect_ratio"],
RT=wing_dict["planform"].get("taper_ratio"),
CL_a_section=wing_dict["airfoil_lift_slope"],
washout=washout_dict["distribution"],
washout_mag=washout_dict["magnitude[deg]"],
washout_CLd=washout_dict["CL_design"],
aileron_lims=[aileron_dict["begin[z/b]"], aileron_dict["end[z/b]"]],
aileron_cf=[aileron_dict["begin[cf/c]"], aileron_dict["end[cf/c]"]],
aileron_hinge_eff=aileron_dict["hinge_efficiency"])
# Set up grid
wing.set_grid(wing_dict["nodes_per_semispan"])
# Set condition
cond_dict = input_dict["condition"]
wing.set_condition(alpha=cond_dict["alpha_root[deg]"],
da=cond_dict["aileron_deflection[deg]"],
p_bar=cond_dict["pbar"])
# Solve
wing.solve()
print()
print("Wing")
print(" Type: {0}".format(wing._planform_type))
print(" Aspect Ratio: {0}".format(wing._AR))
try:
print(" Taper Ratio: {0}".format(wing._RT))
except AttributeError:
pass
print(" Nodes: {0}".format(wing._N))
print()
print("Condition")
print(" Alpha: {0} deg".format(np.degrees(wing._alpha)))
print(" p_bar: {0}".format(wing.p_bar))
print()
print("Aerodynamic Coefficients")
print(" CL: {0}".format(wing.CL))
print(" CD_i (without roll and aileron effects): {0}".format(wing.CD_i_simp))
print(" CD_i (with roll and airleron effects): {0}".format(wing.CD_i))
print(" Cl: {0}".format(wing.Cl))
print(" Cn: {0}".format(wing.Cn))
print()
print("Planform Effects")
print(" CL,a: {0}".format(wing.CL_a))
print(" K_L: {0}".format(wing.K_L))
print(" K_D: {0}".format(wing.K_D))
print(" Span efficiency: {0}".format(wing.e_s))
print()
print("Washout Effects")
print(" Washout effectiveness: {0}".format(wing.e_omega))
print(" K_DL: {0}".format(wing.K_DL))
print(" Washout contribution to induced drag: {0}".format(wing.K_Domega))
print(" K_Do: {0}".format(wing.K_Do))
print()
print("Aileron Effects")
print(" Cl,da: {0}".format(wing.Cl_da))
print()
print("Roll Effects")
print(" Cl,p: {0}".format(wing.Cl_p))
# Check for plot requests
if input_dict["view"]["planform"]:
wing.plot_planform()
if input_dict["view"]["washout_distribution"]:
wing.plot_washout()
if input_dict["view"]["aileron_distribution"]:
wing.plot_aileron()
# Write solution
with open("Solution.txt", 'w') as f:
C_str = np.array2string(wing._C)
C_inv_str = np.array2string(wing._C_inv)
a_n_str = np.array2string(wing._a_n)
b_n_str = np.array2string(wing._b_n)
c_n_str = np.array2string(wing._c_n)
d_n_str = np.array2string(wing._d_n)
print("C array", file=f)
print(C_str, file=f)
print("C_inv array", file=f)
print(C_inv_str, file=f)
print("a_n", file=f)
print(a_n_str, file=f)
print("b_n", file=f)
print(b_n_str, file=f)
print("c_n", file=f)
print(c_n_str, file=f)
print("d_n", file=f)
print(d_n_str, file=f)
| 36.227723
| 197
| 0.583903
| 2,342
| 14,636
| 3.382579
| 0.125534
| 0.031558
| 0.018177
| 0.01641
| 0.29336
| 0.206766
| 0.163974
| 0.120172
| 0.094168
| 0.086342
| 0
| 0.030131
| 0.253963
| 14,636
| 404
| 198
| 36.227723
| 0.695393
| 0.185023
| 0
| 0.119658
| 0
| 0
| 0.102032
| 0.00198
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029915
| false
| 0.004274
| 0.017094
| 0
| 0.051282
| 0.205128
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0005a7c39d8aea447086a691df1fb17b38ca23eb
| 927
|
py
|
Python
|
UAC.py
|
weareblahs/wsa-auto-install
|
0633d2b4e36ba50ddbe5b16505b8a09ff764df26
|
[
"MIT"
] | 76
|
2021-10-29T23:41:26.000Z
|
2021-12-09T06:31:04.000Z
|
UAC.py
|
weareblahs/wsa-auto-install
|
0633d2b4e36ba50ddbe5b16505b8a09ff764df26
|
[
"MIT"
] | 7
|
2021-11-10T19:05:26.000Z
|
2021-12-07T15:53:43.000Z
|
UAC.py
|
weareblahs/wsa-auto-install
|
0633d2b4e36ba50ddbe5b16505b8a09ff764df26
|
[
"MIT"
] | 16
|
2021-11-06T06:17:58.000Z
|
2021-12-08T22:08:24.000Z
|
def elevate():
import ctypes, win32com, win32event, win32process, os, sys
outpath = r'%s\%s.out' % (os.environ["TEMP"], os.path.basename(__file__))
if ctypes.windll.shell32.IsUserAnAdmin():
if os.path.isfile(outpath):
sys.stderr = sys.stdout = open(outpath, 'w', 0)
return
with open(outpath, 'w+', 0) as outfile:
hProc = win32com.shell.shell.ShellExecuteEx(lpFile=sys.executable, \
lpVerb='runas', lpParameters=' '.join(sys.argv), fMask=64, nShow=0)['hProcess']
while True:
hr = win32event.WaitForSingleObject(hProc, 40)
while True:
line = outfile.readline()
if not line: break
sys.stdout.write(line)
if hr != 0x102: break
os.remove(outpath)
sys.stderr = ''
sys.exit(win32process.GetExitCodeProcess(hProc))
if __name__ == '__main__':
elevate()
main()
| 38.625
| 91
| 0.593312
| 105
| 927
| 5.12381
| 0.590476
| 0.022305
| 0.05948
| 0.070632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036928
| 0.269687
| 927
| 24
| 92
| 38.625
| 0.757755
| 0
| 0
| 0.086957
| 0
| 0
| 0.040948
| 0
| 0
| 0
| 0.005388
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0006b94d0b62d69c3ab03500298cb1fb2775bd17
| 629
|
py
|
Python
|
etikihead/urls.py
|
hodeld/etiki-prototype1
|
bcae893423519f6ddfa4f67b980066e04062d9f3
|
[
"MIT"
] | 1
|
2019-08-31T18:04:39.000Z
|
2019-08-31T18:04:39.000Z
|
etikihead/urls.py
|
hodeld/etiki-prototype1
|
bcae893423519f6ddfa4f67b980066e04062d9f3
|
[
"MIT"
] | 19
|
2019-12-12T01:38:49.000Z
|
2022-03-12T00:26:14.000Z
|
etikihead/urls.py
|
hodeld/etiki-prototype1
|
bcae893423519f6ddfa4f67b980066e04062d9f3
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
app_name = 'etikihead'
urlpatterns = [
path('', views.entry_mask, name='entrymask'),
path('contact/', views.contact, name='contact'),
path('privacy/', views.privacy, name='privacy'),
path('terms/', views.legal, name='legal'),
path('impressum/', views.impressum, name='impressum'),
path('about/', views.about, name='about'),
path('faq/', views.faq, name='faq'),
path('todo/', views.todo, name='todo'),
path('startinfo/', views.startinfo, name='startinfo'), #
]
| 26.208333
| 61
| 0.658188
| 78
| 629
| 5.282051
| 0.320513
| 0.072816
| 0.067961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151033
| 629
| 23
| 62
| 27.347826
| 0.771536
| 0
| 0
| 0
| 0
| 0
| 0.197452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0007473466f54c5bf5a586f0d058eba177c13018
| 1,070
|
py
|
Python
|
test_proj/test_tcA002.py
|
leeltib/vizsgamunka_ref
|
59dc64d499c32a548a6e83c251cf16e2787e8672
|
[
"MIT"
] | null | null | null |
test_proj/test_tcA002.py
|
leeltib/vizsgamunka_ref
|
59dc64d499c32a548a6e83c251cf16e2787e8672
|
[
"MIT"
] | null | null | null |
test_proj/test_tcA002.py
|
leeltib/vizsgamunka_ref
|
59dc64d499c32a548a6e83c251cf16e2787e8672
|
[
"MIT"
] | null | null | null |
# TC002 test case - Login in with new user data - exit
import data.data_tcA002 as da02
import func.func_01 as fu01
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
driver.get("http://localhost:1667")
# Wait for loading
fu01.wait(driver, By.ID, "app", 2)
# *** TC-A002 **************************************
def test_A002():
fu01.cookie_ok(driver)
fu01.sign_in(driver, da02.mail, da02.passw)
usern_text = fu01.login_check(driver)
fu01.out_close_driver(driver)
return usern_text
username_text = test_A002()
# ***************************************************
# Normal run
if __name__ == "__main__":
print(username_text)
try:
assert da02.name == username_text
except:
print("Hiba, az ellenőrző feltételnél nincs egyezés.")
| 23.26087
| 91
| 0.675701
| 134
| 1,070
| 5.216418
| 0.544776
| 0.051502
| 0.060086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046256
| 0.151402
| 1,070
| 45
| 92
| 23.777778
| 0.723568
| 0.171028
| 0
| 0
| 0
| 0
| 0.0876
| 0
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.04
| false
| 0.04
| 0.28
| 0
| 0.36
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000803fab8613a18bfb601cb4e0f3433d97e4dce
| 966
|
py
|
Python
|
lisc/tests/test_data_utils.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | 1
|
2020-05-11T18:36:16.000Z
|
2020-05-11T18:36:16.000Z
|
lisc/tests/test_data_utils.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | null | null | null |
lisc/tests/test_data_utils.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the data utilities from lisc."""
from lisc.data.utils import *
###################################################################################################
###################################################################################################
def test_count_elements():
tdat = ['a', 'b', 'a', None]
out = count_elements(tdat)
assert out['a'] == 2
assert out['b'] == 1
assert None not in out
def test_combine_lists():
tdat = [['a', 'b'], None, ['c', 'd']]
out = combine_lists(tdat)
assert out == ['a', 'b', 'c', 'd']
def test_convert_string():
string_words = 'The Last wOrd, in the bRain!'
words_out = convert_string(string_words)
expected = ['last', 'word', 'brain']
assert words_out == expected
def test_lower_list():
words = ['The', 'Cool', 'Project']
words_out = lower_list(words)
expected = ['the', 'cool', 'project']
assert words_out == expected
| 23.560976
| 99
| 0.471014
| 105
| 966
| 4.161905
| 0.361905
| 0.064073
| 0.077803
| 0.064073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002541
| 0.1853
| 966
| 40
| 100
| 24.15
| 0.552732
| 0.040373
| 0
| 0.095238
| 0
| 0
| 0.113416
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.190476
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0008876f23a1dced29f967f65132c7d09b0756dc
| 2,316
|
py
|
Python
|
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | 1
|
2022-03-07T15:34:11.000Z
|
2022-03-07T15:35:15.000Z
|
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | null | null | null |
from leapp import reporting
from leapp.actors import Actor
from leapp.models import FirewalldGlobalConfig, FirewallsFacts
from leapp.reporting import create_report, Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class FirewalldCheckAllowZoneDrifting(Actor):
"""
This actor will check if AllowZoneDrifiting=yes in firewalld.conf. This
option has been removed in RHEL-9 and behavior is as if
AllowZoneDrifiting=no.
"""
name = 'firewalld_check_allow_zone_drifting'
consumes = (FirewallsFacts, FirewalldGlobalConfig)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
# If firewalld is not enabled then don't bother the user about its
# configuration. This Report keys off a _default_ value and as such
# will trigger for all users that have not done one of the following:
# - disabled firewalld
# - manually set AllowZoneDrifting=no (as firewalld logs suggests)
#
for facts in self.consume(FirewallsFacts):
if not facts.firewalld.enabled:
return
for facts in self.consume(FirewalldGlobalConfig):
if not facts.allowzonedrifting:
return
create_report([
reporting.Title('Firewalld Configuration AllowZoneDrifting Is Unsupported'),
reporting.Summary('Firewalld has enabled configuration option '
'"{conf_key}" which has been removed in RHEL-9. '
'New behavior is as if "{conf_key}" was set to "no".'.format(
conf_key='AllowZoneDrifiting')),
reporting.Severity(reporting.Severity.HIGH),
reporting.Tags([reporting.Tags.SANITY, reporting.Tags.FIREWALL]),
reporting.Flags([reporting.Flags.INHIBITOR]),
reporting.ExternalLink(
url='https://access.redhat.com/articles/4855631',
title='Changes in firewalld related to Zone Drifting'),
reporting.Remediation(
hint='Set AllowZoneDrifting=no in /etc/firewalld/firewalld.conf',
commands=[['sed -i "s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/" '
'/etc/firewalld/firewalld.conf']]),
])
| 44.538462
| 91
| 0.642055
| 240
| 2,316
| 6.15
| 0.470833
| 0.030488
| 0.01897
| 0.02168
| 0.056911
| 0.028455
| 0
| 0
| 0
| 0
| 0
| 0.00538
| 0.277634
| 2,316
| 51
| 92
| 45.411765
| 0.876868
| 0.189983
| 0
| 0.058824
| 0
| 0
| 0.258798
| 0.075799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.147059
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0009620a33b624fda2004552df089e8ea26f0972
| 665
|
py
|
Python
|
eeve/eeve actions/list_dir.py
|
vMarcelino/eeve
|
7dcfa17d34480f5c120ce963680babffff8ab412
|
[
"Apache-2.0"
] | 1
|
2019-10-11T18:42:48.000Z
|
2019-10-11T18:42:48.000Z
|
eeve/eeve actions/list_dir.py
|
vMarcelino/eeve
|
7dcfa17d34480f5c120ce963680babffff8ab412
|
[
"Apache-2.0"
] | null | null | null |
eeve/eeve actions/list_dir.py
|
vMarcelino/eeve
|
7dcfa17d34480f5c120ce963680babffff8ab412
|
[
"Apache-2.0"
] | 1
|
2019-10-11T18:42:49.000Z
|
2019-10-11T18:42:49.000Z
|
import os
def run(path: str, return_full_path: bool = False):
"""Gets all files and folders from a path and stores them into $file_list
Arguments:
path {str} -- The path to get files and folders from
Keyword Arguments:
return_full_path {bool} -- True to return the full path of the file instead of just the file name (default: {False})
Returns:
file_list {List[str]} -- list of files and folders
"""
result = os.listdir(path)
if return_full_path:
for i, f in enumerate(result):
result[i] = os.path.join(path, f)
return {'file_list': result}
actions = {"list dir": run}
| 27.708333
| 124
| 0.627068
| 98
| 665
| 4.163265
| 0.459184
| 0.078431
| 0.102941
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.276692
| 665
| 23
| 125
| 28.913043
| 0.848233
| 0.518797
| 0
| 0
| 0
| 0
| 0.062271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0009cacc81bd5d1ceb8972e6ec2ff4235cfdb2ad
| 11,938
|
py
|
Python
|
tests/test_workspaces.py
|
jeokrohn/wxc_sdk
|
e28b7e0f870d17b7f9a79ad9a4b8af221e58f8e9
|
[
"MIT"
] | null | null | null |
tests/test_workspaces.py
|
jeokrohn/wxc_sdk
|
e28b7e0f870d17b7f9a79ad9a4b8af221e58f8e9
|
[
"MIT"
] | null | null | null |
tests/test_workspaces.py
|
jeokrohn/wxc_sdk
|
e28b7e0f870d17b7f9a79ad9a4b8af221e58f8e9
|
[
"MIT"
] | 1
|
2022-03-29T18:56:59.000Z
|
2022-03-29T18:56:59.000Z
|
"""
Test for workspaces API
"""
# TODO: tests for authorization codes
import random
from collections.abc import Generator
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from wxc_sdk.rest import RestError
from wxc_sdk.all_types import *
from .base import TestCaseWithLog
TEST_WORKSPACES_PREFIX = 'workspace test '
class TestList(TestCaseWithLog):
def test_001_list(self):
workspaces = list(self.api.workspaces.list())
print(f'got {len(workspaces)} workspaces')
print('\n'.join(w.json() for w in workspaces))
class TestDetails(TestCaseWithLog):
def test_001_all(self):
"""
details for all workspaces
"""
ws = self.api.workspaces
ws_list = ws.list()
with ThreadPoolExecutor() as pool:
details = list(pool.map(lambda w: ws.details(workspace_id=w.workspace_id),
ws_list))
print(f'got details for {len(details)} workspaces')
class TestOutgoingPermissionsAutoTransferNumbers(TestCaseWithLog):
def test_001_get_all(self):
"""
get outgoing permissions auto transfer numbers for all workspaces
"""
wsa = self.api.workspaces
tna = self.api.workspace_settings.permissions_out.transfer_numbers
targets = [ws for ws in wsa.list()
if ws.calling == CallingType.webex]
if not targets:
self.skipTest('Need some WxC enabled workspaces to run this test')
with ThreadPoolExecutor() as pool:
_ = list(pool.map(lambda ws: tna.read(person_id=ws.workspace_id),
targets))
print(f'outgoing permissions auto transfer numbers for {len(targets)} workspaces')
@contextmanager
def target_ws_context(self, use_custom_enabled: bool = True) -> Workspace:
"""
pick a random workspace and make sure that the outgoing permission settings are restored
:return:
"""
po = self.api.workspace_settings.permissions_out
targets = [ws for ws in self.api.workspaces.list()
if ws.calling == CallingType.webex]
if not targets:
self.skipTest('Need some WxC enabled workspaces to run this test')
random.shuffle(targets)
# if enable == False then we need a workspace where custom_enabled is not set. Else setting it to False
# will clear all existing customer settings and we want to avoid that side effect of the test
po_settings = None
target_ws = next((ws for ws in targets
if use_custom_enabled or
not (po_settings := po.read(person_id=ws.workspace_id)).use_custom_enabled),
None)
if target_ws is None:
self.skipTest('No WxC enabled workspace with use_custom_enabled==False')
if po_settings is None:
po_settings = po.read(person_id=target_ws.workspace_id)
try:
if use_custom_enabled:
# enable custom settings: else auto transfer numbers can't be set
po.configure(person_id=target_ws.workspace_id,
settings=OutgoingPermissions(use_custom_enabled=use_custom_enabled))
yield target_ws
finally:
# restore old settings
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=po_settings)
po_restored = po.read(person_id=target_ws.workspace_id)
self.assertEqual(po_settings, po_restored)
def test_002_update_wo_custom_enabled(self):
"""
updating auto transfer numbers requires use_custom_enabled to be set
:return:
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context(use_custom_enabled=False) as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
# change auto transfer number 1
update = numbers.copy(deep=True)
transfer = f'+4961007739{random.randint(0, 999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# update should not work with use_custom_enabled == False
self.assertEqual(numbers, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
def test_003_update_one_number(self):
"""
try to update auto transfer numbers for a workspace
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
# change auto transfer number 1
update = numbers.copy(deep=True)
transfer = f'+496100773{random.randint(0, 9999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# number should be equal; ignore hyphens in number returned by API
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
# other than that the updated numbers should be identical to the numbers before
updated.auto_transfer_number1 = numbers.auto_transfer_number1
self.assertEqual(numbers, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
def test_002_update_one_number_no_effect_on_other_numbers(self):
"""
try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the
other numbers
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
all_numbers_set = AutoTransferNumbers(auto_transfer_number1='+4961007738001',
auto_transfer_number2='+4961007738002',
auto_transfer_number3='+4961007738003')
tna.configure(person_id=target_ws.workspace_id, settings=all_numbers_set)
all_numbers_set = tna.read(person_id=target_ws.workspace_id)
# change auto transfer number 1
transfer = f'+496100773{random.randint(0, 9999):03}'
update = AutoTransferNumbers(auto_transfer_number1=transfer)
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# number should be equal; ignore hyphens in number returned by API
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
# other than that the updated numbers should be identical to the numbers before
updated.auto_transfer_number1 = all_numbers_set.auto_transfer_number1
self.assertEqual(all_numbers_set, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
class TestCreateUpdate(TestCaseWithLog):
def new_names(self) -> Generator[str, None, None]:
ws_list = list(self.api.workspaces.list())
ws_names = set(w.display_name for w in ws_list)
new_gen = (name for i in range(1000)
if (name := f'{TEST_WORKSPACES_PREFIX}{i:03}') not in ws_names)
return new_gen
@contextmanager
def target(self, no_edge: bool = False):
ws = self.api.workspaces
ws_list = list(ws.list())
if no_edge:
ws_list = [ws for ws in ws_list
if ws.calling != CallingType.edge_for_devices]
targat_ws = random.choice(ws_list)
targat_ws = ws.details(workspace_id=targat_ws.workspace_id)
try:
yield targat_ws
finally:
ws.update(workspace_id=targat_ws.workspace_id, settings=targat_ws)
restored = ws.details(workspace_id=targat_ws.workspace_id)
self.assertEqual(targat_ws, restored)
def test_001_trivial(self):
"""
create workspace with minimal settings
"""
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace.create(display_name=name)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
def test_002_edge_for_devices(self):
"""
create workspace with edge_for_devices
"""
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace(display_name=name, calling=CallingType.edge_for_devices)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
def test_003_change_name_full(self):
"""
change name of a workspace, full settings
"""
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
settings: Workspace = target_ws.copy(deep=True)
new_name = next(self.new_names())
settings.display_name = new_name
after = ws.update(workspace_id=target_ws.workspace_id,
settings=settings)
self.assertEqual(new_name, after.display_name)
def test_004_change_name_name_only(self):
"""
change name of a workspace, only name update
"""
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
new_name = next(self.new_names())
settings = Workspace(display_name=new_name)
after = ws.update(workspace_id=target_ws.workspace_id,
settings=settings)
self.assertEqual(new_name, after.display_name)
class TestDelete(TestCaseWithLog):
def test_001_delete_one(self):
"""
delete a random workspace
"""
ws = self.api.workspaces
ws_list = list(ws.list(display_name=TEST_WORKSPACES_PREFIX))
if not ws_list:
self.skipTest('No test workspace to delete')
target = random.choice(ws_list)
ws.delete_workspace(workspace_id=target.workspace_id)
with self.assertRaises(RestError) as exc:
ws.details(workspace_id=target.workspace_id)
rest_error: RestError = exc.exception
self.assertEqual(404, rest_error.response.status_code)
| 43.410909
| 116
| 0.627576
| 1,399
| 11,938
| 5.13867
| 0.149392
| 0.045625
| 0.050633
| 0.060787
| 0.594102
| 0.542774
| 0.501182
| 0.496453
| 0.475588
| 0.415774
| 0
| 0.017116
| 0.295276
| 11,938
| 274
| 117
| 43.569343
| 0.837395
| 0.128916
| 0
| 0.469945
| 0
| 0
| 0.059267
| 0.013875
| 0
| 0
| 0
| 0.00365
| 0.087432
| 1
| 0.076503
| false
| 0
| 0.038251
| 0
| 0.147541
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000aa2371b1616577368d0ba5de43105bfebe942
| 1,935
|
py
|
Python
|
openpose/data/parse_tfrecord.py
|
calmisential/Pose_Estimation
|
f3546fcfdc81ef60708fbda5fc1eb499679fff2f
|
[
"MIT"
] | null | null | null |
openpose/data/parse_tfrecord.py
|
calmisential/Pose_Estimation
|
f3546fcfdc81ef60708fbda5fc1eb499679fff2f
|
[
"MIT"
] | null | null | null |
openpose/data/parse_tfrecord.py
|
calmisential/Pose_Estimation
|
f3546fcfdc81ef60708fbda5fc1eb499679fff2f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import glob
from configuration import OpenPoseCfg as cfg
from openpose.data.augmentation import Transformer
def get_tfrecord_filenames(path):
print("从"+path+"中提取TFRecords文件:")
tfrecord_files = glob.glob(path + "*")
tfrecord_files.sort()
if not tfrecord_files:
raise ValueError("未找到TFRecords文件!")
for filename in tfrecord_files:
print(filename)
return tfrecord_files
def place_label_func(label):
paf_tr = label["pafs"]
kpt_tr = label["kpts"]
image = label["image"]
return image, (paf_tr, paf_tr, paf_tr, paf_tr, kpt_tr, kpt_tr)
class TFRecordDataset:
def __init__(self, tfrecord_filenames, label_placement_func):
self.AUTOTUNE = tf.data.AUTOTUNE
self.label_place = label_placement_func
self.tfrecords = tfrecord_filenames
self.transformer = Transformer()
self.img_aug = cfg.image_aug_on
self.batch_size = cfg.batch_size
def generate(self):
dataset = tf.data.TFRecordDataset(filenames=self.tfrecords)
dataset = dataset.map(self.transformer.read_tfrecord, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.map(self.transformer.read_image, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.map(self.transformer.convert_label_to_tensors, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.batch(self.batch_size)
if self.img_aug:
dataset = dataset.map(self.transformer.image_aug, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.map(self.transformer.apply_mask, num_parallel_calls=self.AUTOTUNE)
dataset = dataset.map(self.label_place, num_parallel_calls=self.AUTOTUNE)
# dataset = dataset.repeat()
return dataset
def get_dataset():
tfrecord_files = get_tfrecord_filenames(cfg.train_tfrecords)
dataset = TFRecordDataset(tfrecord_files, place_label_func).generate()
return dataset
| 34.553571
| 106
| 0.724548
| 242
| 1,935
| 5.533058
| 0.27686
| 0.083645
| 0.076176
| 0.0941
| 0.302465
| 0.278566
| 0.233757
| 0.171023
| 0.171023
| 0.134429
| 0
| 0
| 0.186047
| 1,935
| 56
| 107
| 34.553571
| 0.850159
| 0.013437
| 0
| 0.04878
| 0
| 0
| 0.023585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.097561
| 0
| 0.341463
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000df0f38e45fa3a6986ef9af7fbf9e539d0a092
| 689
|
py
|
Python
|
gbdxtools/images/quickbird.py
|
matthewhanson/gbdxtools
|
f07fed2ea2b8d62845f6cf83c3947d0c2a4c6daf
|
[
"MIT"
] | 81
|
2016-04-05T23:32:46.000Z
|
2022-01-02T21:21:09.000Z
|
gbdxtools/images/quickbird.py
|
matthewhanson/gbdxtools
|
f07fed2ea2b8d62845f6cf83c3947d0c2a4c6daf
|
[
"MIT"
] | 624
|
2016-04-06T22:22:01.000Z
|
2022-01-03T17:48:50.000Z
|
gbdxtools/images/quickbird.py
|
matthewhanson/gbdxtools
|
f07fed2ea2b8d62845f6cf83c3947d0c2a4c6daf
|
[
"MIT"
] | 66
|
2016-04-13T22:45:37.000Z
|
2022-01-03T18:03:26.000Z
|
from gbdxtools.images.worldview import WorldViewImage
from gbdxtools.images.geoeye01 import GeoEyeDriver
from gbdxtools.images.util import vector_services_query
band_types = {
'MS': 'BGRN',
'Panchromatic': 'PAN',
'Pan': 'PAN',
'pan': 'PAN'
}
class QB02Driver(GeoEyeDriver):
pass
class QB02(WorldViewImage):
__Driver__ = QB02Driver
@property
def _rgb_bands(self):
return [2,1,0]
@staticmethod
def _find_parts(cat_id, band_type):
query = "item_type:IDAHOImage AND attributes.catalogID:{} " \
"AND attributes.colorInterpretation:{}".format(cat_id, band_types[band_type])
return vector_services_query(query)
| 25.518519
| 93
| 0.690856
| 78
| 689
| 5.858974
| 0.576923
| 0.052516
| 0.124726
| 0.052516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019964
| 0.20029
| 689
| 26
| 94
| 26.5
| 0.809437
| 0
| 0
| 0
| 0
| 0
| 0.172714
| 0.081277
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0.047619
| 0.142857
| 0.047619
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000f226aca878e3b01ee23b36d3e3744fe747d69
| 1,137
|
py
|
Python
|
fastISM/models/bpnet.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 12
|
2020-09-20T17:03:48.000Z
|
2022-03-16T06:51:52.000Z
|
fastISM/models/bpnet.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 5
|
2020-10-24T20:43:45.000Z
|
2022-02-25T19:40:47.000Z
|
fastISM/models/bpnet.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 2
|
2020-10-14T05:18:55.000Z
|
2022-02-21T07:34:14.000Z
|
import tensorflow as tf
def bpnet_model(seqlen=1000, numchars=4, num_dilated_convs=9, num_tasks=1,
name='bpnet_model'):
# original as per https://www.biorxiv.org/content/10.1101/737981v1.full.pdf
inp = tf.keras.layers.Input(shape=(seqlen, 4))
x = tf.keras.layers.Conv1D(
64, kernel_size=25, padding='same', activation='relu')(inp)
for i in range(num_dilated_convs):
conv_x = tf.keras.layers.Conv1D(
64, kernel_size=3, padding='same', activation='relu', dilation_rate=2**i)(x)
x = tf.keras.layers.Add()([conv_x, x])
bottleneck = x
# heads
outputs = []
for _ in range(num_tasks):
# profile shape head
px = tf.keras.layers.Reshape((-1, 1, 64))(bottleneck)
px = tf.keras.layers.Conv2DTranspose(
1, kernel_size=(25, 1), padding='same')(px)
outputs.append(tf.keras.layers.Flatten()(px))
# total counts head
cx = tf.keras.layers.GlobalAvgPool1D()(bottleneck)
outputs.append(tf.keras.layers.Dense(1)(cx))
model = tf.keras.Model(inputs=inp, outputs=outputs)
return model
| 34.454545
| 88
| 0.62533
| 158
| 1,137
| 4.405063
| 0.455696
| 0.100575
| 0.168103
| 0.060345
| 0.166667
| 0.091954
| 0.091954
| 0.091954
| 0
| 0
| 0
| 0.047891
| 0.228672
| 1,137
| 32
| 89
| 35.53125
| 0.745724
| 0.102023
| 0
| 0
| 0
| 0
| 0.030512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
000fb685fc9f26f073890df0d180e999e12bb012
| 801
|
py
|
Python
|
leetcode/108-Convert-Sorted-Array-To-Binary-Search-Tree/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 25
|
2018-05-22T15:18:50.000Z
|
2022-01-08T02:41:46.000Z
|
leetcode/108-Convert-Sorted-Array-To-Binary-Search-Tree/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 1
|
2019-05-24T16:55:27.000Z
|
2019-05-24T16:55:27.000Z
|
leetcode/108-Convert-Sorted-Array-To-Binary-Search-Tree/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 18
|
2018-09-20T15:39:26.000Z
|
2022-03-02T21:38:22.000Z
|
#!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
def help(left, right):
if left > right: return None
mid = (left + right) // 2
root = TreeNode(nums[mid])
root.left = help(left, mid-1)
root.right = help(mid+1, right)
return root
return help(0, len(nums)-1)
#-------------------------------------------------------------------------------
# Testing
| 27.62069
| 80
| 0.400749
| 75
| 801
| 4.226667
| 0.493333
| 0.085174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010772
| 0.304619
| 801
| 28
| 81
| 28.607143
| 0.558348
| 0.469413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0011bad4390288b5a901919fe11d0ebf83273af9
| 596
|
py
|
Python
|
tests/test_filter.py
|
rdemaria/xpart
|
35fe06eeb508991dfe1dd23685331f8347d0b603
|
[
"MIT"
] | 1
|
2021-09-07T14:34:10.000Z
|
2021-09-07T14:34:10.000Z
|
tests/test_filter.py
|
rdemaria/xpart
|
35fe06eeb508991dfe1dd23685331f8347d0b603
|
[
"MIT"
] | null | null | null |
tests/test_filter.py
|
rdemaria/xpart
|
35fe06eeb508991dfe1dd23685331f8347d0b603
|
[
"MIT"
] | 5
|
2021-11-04T08:23:43.000Z
|
2022-03-16T10:34:23.000Z
|
import numpy as np
import xobjects as xo
import xpart as xp
def test_basics():
for context in xo.context.get_test_contexts():
print(f"Test {context.__class__}")
p1 = xp.Particles(x=[1,2,3], px=[10, 20, 30],
mass0=xp.ELECTRON_MASS_EV,
_context=context)
mask = p1.x > 1
p2 = p1.filter(mask)
assert p2._buffer.context == context
assert p2._capacity == 2
dct = p2.to_dict()
assert dct['mass0'] == xp.ELECTRON_MASS_EV
assert np.all(dct['px'] == np.array([20., 30.]))
| 25.913043
| 56
| 0.552013
| 83
| 596
| 3.783133
| 0.53012
| 0.012739
| 0.095541
| 0.121019
| 0.133758
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059113
| 0.318792
| 596
| 22
| 57
| 27.090909
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.052013
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
001513d4c6890aca681f0ade18ed556b34353f85
| 4,550
|
py
|
Python
|
main.py
|
NimaVahdat/Image-Categorization
|
4addce895b14c0c663e3ee317ffcd802b774452b
|
[
"MIT"
] | null | null | null |
main.py
|
NimaVahdat/Image-Categorization
|
4addce895b14c0c663e3ee317ffcd802b774452b
|
[
"MIT"
] | null | null | null |
main.py
|
NimaVahdat/Image-Categorization
|
4addce895b14c0c663e3ee317ffcd802b774452b
|
[
"MIT"
] | null | null | null |
from utils.loader import Loader
from utils.model import DeepSNN
import torch
import os
def feature_extraction(prop):
name = prop["name"]
epochs_l1 = prop["epochs_l1"]
epochs_l2 = prop["epochs_l2"]
trainset, testset = Loader(name)
model = DeepSNN(prop)
# Training The First Layer
print("-------Training the first layer-------")
if os.path.isfile(name+"_Layer1.net"):
model.load_state_dict(torch.load(name+"_Layer1.net"))
print("Loaded from disck!")
else:
for epoch in range(epochs_l1):
print("Epoch:", epoch)
for data,_ in trainset:
model.train_model(data, 1)
print("\nDone!")
torch.save(model.state_dict(), name+"_Layer1.net")
# Training The Second Layer
print("-------Training the second layer-------")
if os.path.isfile(name+"_Layer2.net"):
model.load_state_dict(torch.load(name+"_Layer2.net"))
print("Loaded from disck!")
else:
for epoch in range(epochs_l2):
print("Epoch:", epoch)
for data,_ in trainset:
model.train_model(data, 2)
print("\nDone!")
torch.save(model.state_dict(), name+"_Layer2.net")
# Classification on trainset and testset
# Get train data
for data,target in trainset:
train_X, train_y = model.test(data, target, 2)
# Get test data
for data,target in testset:
test_X, test_y = model.test(data, target, 2)
return train_X, train_y, test_X, test_y, (model.conv1.weight, model.conv2.weight)
def Classification(train_X, train_y, test_X, test_y, C=2.4):
# SVM
from sklearn.svm import LinearSVC
clf = LinearSVC(C=C)
clf.fit(train_X, train_y)
predicted_train = clf.predict(train_X)
predicted_test = clf.predict(test_X)
return predicted_train, predicted_test
def performance(x, y, predict):
correct = 0
silence = 0
for i in range(len(predict)):
if x[i].sum() == 0:
silence += 1
else:
if predict[i] == y[i]:
correct += 1
return (correct/len(x), (len(x)-(correct+silence))/len(x), silence/len(x))
def confussion_matrix(test_y, predicted_test, labels):
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
cm = confusion_matrix(test_y, predicted_test)
cmd_obj = ConfusionMatrixDisplay(cm, display_labels=labels)
# print(cm)
cmd_obj.plot()
plt.show()
# %%
Caltech = { "name" : "Caltech",
"epochs_l1" : 20,
"epochs_l2" : 100,
"weight_mean" : 0.8,
"weight_std" : 0.05,
"lr" : (0.005, -0.0025),
"in_channel1" : 4,
"in_channel2" : 40,
"out_channel" : 150,
"k1" : 10,
"k2" : 25,
"r1" : 0,
"r2" : 2,}
train_X, train_y, test_X, test_y, weights = feature_extraction(Caltech)
predicted_train, predicted_test = Classification(train_X, train_y, test_X, test_y)
n = performance(train_X, train_y, predicted_train)
m = performance(test_X, test_y, predicted_test)
print(n)
print(m)
labels = ['Airplane', 'Car_side', 'Faces_easy', 'Motorbikes']
confussion_matrix(test_y, predicted_test, labels)
# %%
MNIST = {"name" : "MNIST",
"epochs_l1":2,
"epochs_l2":20,
"weight_mean" : 0.8,
"weight_std" : 0.05,
"lr" : (0.004, -0.003),
"in_channel1" : 2,
"in_channel2" : 32,
"out_channel" : 150,
"k1" : 5,
"k2" : 8,
"r1" : 2,
"r2" : 1,}
train_X, train_y, test_X, test_y, weights = feature_extraction(MNIST)
predicted_train, predicted_test = Classification(train_X, train_y, test_X, test_y)
n = performance(train_X, train_y, predicted_train)
m = performance(test_X, test_y, predicted_test)
print(n)
print(m)
labels = ['0','1','2','3','4','5','6','7','8','9']
confussion_matrix(test_y, predicted_test, labels)
# %%
# import cv2
# import numpy as np
# w1, w2 = weights
# w1 = torch.reshape(w1, (160, 5, 5))
# # w2 = torch.reshape(w2, (6000, 2, 2))
# def features_pic(w, i):
# # w = torch.squeeze(w)
# w -= w.min()
# w = (w/w.max()) * 255
# pic = cv2.resize(np.array(w), (100, 100))
# cv2.imwrite("features/feature" + str(i) + ".jpg", pic)
# for i in range(len(w1)):
# features_pic(w1[i], i)
| 28.980892
| 86
| 0.575824
| 605
| 4,550
| 4.147107
| 0.247934
| 0.025907
| 0.043842
| 0.047828
| 0.479075
| 0.437625
| 0.392188
| 0.376246
| 0.274213
| 0.259864
| 0
| 0.042404
| 0.27956
| 4,550
| 157
| 87
| 28.980892
| 0.723002
| 0.112527
| 0
| 0.281553
| 0
| 0
| 0.118486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0
| 0.067961
| 0
| 0.135922
| 0.116505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
001659507468ba211846a086bb3af6d259d15e23
| 409
|
py
|
Python
|
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
1704-determine-if-string-halves-are-alike/1704-determine-if-string-halves-are-alike.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def halvesAreAlike(self, s: str) -> bool:
vowel = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}
first = s[:int(len(s)/2)]
second = s[int(len(s)/2):]
firstsum = sum([1 for f in first if f in vowel])
secondsum = sum([1 for s in second if s in vowel])
if firstsum==secondsum:
return True
else:
return False
| 37.181818
| 66
| 0.484108
| 61
| 409
| 3.245902
| 0.508197
| 0.020202
| 0.030303
| 0.040404
| 0.141414
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014815
| 0.339853
| 409
| 11
| 67
| 37.181818
| 0.718519
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00198cc9e3c841bb01a56c333dd3c279b3334a56
| 9,789
|
py
|
Python
|
honeycomb/worker_bee.py
|
agrc/honeycomb
|
a4227221759541b007c2d2a8dcfca5a40192eeff
|
[
"MIT"
] | 1
|
2018-06-07T13:17:40.000Z
|
2018-06-07T13:17:40.000Z
|
honeycomb/worker_bee.py
|
agrc/honeycomb
|
a4227221759541b007c2d2a8dcfca5a40192eeff
|
[
"MIT"
] | 24
|
2017-08-28T19:53:15.000Z
|
2022-03-28T21:36:37.000Z
|
honeycomb/worker_bee.py
|
agrc/honeycomb
|
a4227221759541b007c2d2a8dcfca5a40192eeff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# * coding: utf8 *
'''
worker_bee.py
A module that contains logic for building traditional image-based caches.
'''
import os
import socket
import time
from os.path import join, dirname, realpath
from shutil import rmtree
import pygsheets
from datetime import date
import arcpy
from . import config, settings, update_data
from .messaging import send_email
spot_cache_name = 'spot cache'
error_001470_message = 'ERROR 001470: Failed to retrieve the job status from server. The Job is running on the server, please use the above URL to check the job status.\nFailed to execute (ManageMapServerCacheTiles).\n' # noqa
def parse_levels(levels_txt):
#: parse the levels parameter text into an array of scales
min, max = map(int, levels_txt.split('-'))
return settings.SCALES[min:max + 1]
def intersect_scales(scales, restrict_scales):
#: return the intersection of between scales and restrict_scales
intersection = set(scales) & set(restrict_scales)
return list(intersection)
class WorkerBee(object):
def __init__(self, s_name, missing_only=False, skip_update=False, skip_test=False, spot_path=False, levels=False):
print('caching {}'.format(s_name))
self.errors = []
self.start_time = time.time()
self.service_name = s_name
if not levels:
self.restrict_scales = settings.SCALES
else:
self.restrict_scales = parse_levels(levels)
try:
print('deleting previous *_GCS folder, if any')
rmtree(os.path.join(settings.CACHE_DIR, s_name + '_GCS'))
except Exception:
pass
if config.is_dev():
self.complete_num_bundles = 19
else:
self.complete_num_bundles = settings.COMPLETE_NUM_BUNDLES_LU[self.service_name]
ip = socket.gethostbyname(socket.gethostname())
self.preview_url = settings.PREVIEW_URL.format(ip, self.service_name)
self.service = os.path.join(config.get_ags_connection(), '{}.MapServer'.format(self.service_name))
self.email_subject = 'Cache Update ({})'.format(self.service_name)
if skip_update:
print('skipping data update...')
else:
update_data.main()
send_email(self.email_subject, 'Data update complete. Proceeding with caching...')
if skip_test:
print('skipping test cache...')
else:
self.cache_test_extent()
if missing_only:
print('caching empty tiles only...')
self.missing_only = missing_only
self.start_bundles = self.get_bundles_count()
if self.missing_only:
self.update_mode = 'RECREATE_EMPTY_TILES'
print('Caching empty tiles only')
else:
self.update_mode = 'RECREATE_ALL_TILES'
print('Caching all tiles')
if not spot_path:
self.cache(not levels)
else:
#: levels 0-17 include the entire state
print('spot caching levels 0-17...')
self.cache_extent(settings.SCALES[:18], spot_path, spot_cache_name)
#: levels 18-19 intersect with cache extent
print('intersecting spot cache polygon with level 18-19 cache extent...')
intersect = arcpy.analysis.Intersect([spot_path, join(settings.EXTENTSFGDB, settings.EXTENT_18_19)],
'in_memory/spot_cache_intersect',
join_attributes='ONLY_FID')
print('spot caching levels 18-19...')
self.cache_extent(settings.SCALES[18:], intersect, spot_cache_name)
def cache_extent(self, scales, aoi, name):
cache_scales = intersect_scales(scales, self.restrict_scales)
if len(cache_scales) == 0:
return
print('caching {} at {}'.format(name, cache_scales))
if config.is_dev() and name != spot_cache_name:
aoi = settings.TEST_EXTENT
try:
arcpy.server.ManageMapServerCacheTiles(self.service, cache_scales, self.update_mode, settings.NUM_INSTANCES, aoi)
except arcpy.ExecuteError as e:
if e.message == error_001470_message:
msg = 'ERROR 001470 thrown. Moving on and hoping the job completes successfully.'
print(msg)
send_email('Cache Warning (ERROR 001470)', 'e.message\n\narcpy.GetMessages:\n{}'.format(arcpy.GetMessages().encode('utf-8')))
else:
self.errors.append([cache_scales, aoi, name])
print(arcpy.GetMessages().encode('utf-8'))
send_email('Cache Update ({}) - arcpy.ExecuteError'.format(self.service_name), arcpy.GetMessages().encode('utf-8'))
def get_progress(self):
total_bundles = self.get_bundles_count()
bundles_per_hour = (total_bundles - self.start_bundles) / ((time.time() - self.start_time) / 60 / 60)
if bundles_per_hour != 0 and total_bundles > self.start_bundles:
hours_remaining = (self.complete_num_bundles - total_bundles) / bundles_per_hour
else:
self.start_time = time.time()
hours_remaining = '??'
percent = int(round(float(total_bundles) / self.complete_num_bundles * 100.00))
msg = '{} of {} ({}%) bundle files created.\nEstimated hours remaining: {}'.format(
total_bundles, self.complete_num_bundles, percent, hours_remaining)
print(msg)
return msg
def get_bundles_count(self):
totalfiles = 0
basefolder = os.path.join(settings.CACHE_DIR, self.service_name.replace('/', '_'), 'Layers', '_alllayers')
for d in os.listdir(basefolder):
if d != 'missing.jpg':
totalfiles += len(os.listdir(os.path.join(basefolder, d)))
return totalfiles
def cache_test_extent(self):
print('caching test extent')
cache_scales = intersect_scales(settings.SCALES, self.restrict_scales)
try:
arcpy.server.ManageMapServerCacheTiles(self.service, cache_scales, 'RECREATE_ALL_TILES', settings.NUM_INSTANCES, settings.TEST_EXTENT)
send_email('Cache Test Extent Complete ({})'.format(self.service_name), self.preview_url)
# if raw_input('Recache test extent (T) or continue with full cache (F): ') == 'T':
# self.cache_test_extent()
except arcpy.ExecuteError:
print(arcpy.GetMessages().encode('utf-8'))
send_email('Cache Test Extent Error ({}) - arcpy.ExecuteError'.format(self.service_name), arcpy.GetMessages().encode('utf-8'))
raise arcpy.ExecuteError
def cache(self, run_all_levels):
arcpy.env.workspace = settings.EXTENTSFGDB
for fc_name, scales in settings.CACHE_EXTENTS:
self.cache_extent(scales, fc_name, fc_name)
send_email(self.email_subject,
'Levels 0-9 completed.\n{}\n{}'.format(self.get_progress(), self.preview_url))
if config.is_dev():
settings.GRIDS = settings.GRIDS[:-4]
for grid in settings.GRIDS:
total_grids = int(arcpy.management.GetCount(grid[0])[0])
grid_count = 0
progress = ''
with arcpy.da.SearchCursor(grid[0], ['SHAPE@', 'OID@']) as cur:
for row in cur:
grid_count += 1
grid_percent = int(round((float(grid_count) / total_grids) * 100))
self.cache_extent([grid[1]], row[0], '{}: OBJECTID: {}'.format(grid[0], row[1]))
grit_percent_msg = 'Grids for this level completed: {}%'.format(grid_percent)
print(grit_percent_msg)
progress = self.get_progress()
send_email(self.email_subject, 'Level {} completed.\n{}\n{}\nNumber of errors: {}'.format(grid[0], progress, self.preview_url, len(self.errors)))
while (len(self.errors) > 0):
msg = 'Recaching errors. Errors left: {}'.format(len(self.errors))
print(msg)
send_email(self.email_subject, msg)
self.cache_extent(*self.errors.pop())
bundles = self.get_bundles_count()
if bundles < self.complete_num_bundles and run_all_levels:
msg = 'Only {} out of {} bundles completed. Recaching...'.format(bundles, self.complete_num_bundles)
print(msg)
send_email(self.email_subject, msg)
self.cache(True)
send_email(self.email_subject + ' Finished', 'Caching complete!\n\n{}'.format(self.preview_url))
print('updating google spreadsheets')
client = pygsheets.authorize(service_file=join(dirname(realpath(__file__)), 'service_account.json'))
sgid_sheet = client.open_by_key('11ASS7LnxgpnD0jN4utzklREgMf1pcvYjcXcIcESHweQ')
sgid_worksheet = sgid_sheet[0]
base_maps_sheet = client.open_by_key('1XnncmhWrIjntlaMfQnMrlcCTyl9e2i-ztbvqryQYXDc')
base_maps_worksheet = base_maps_sheet[0]
#: update sgid changelog
today = date.today().strftime(r'%m/%d/%Y')
matrix = sgid_worksheet.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False)
row = [today, 'Complete', self.service_name, 'Recache', 'Statewide cache rebuild and upload to GCP', 'stdavis', 'no', 'no', 'no', 'no', 'no', 'no', 'yes']
sgid_worksheet.insert_rows(len(matrix), values=row, inherit=True)
#: update base maps spreadsheet embedded in gis.utah.gov page
this_month = date.today().strftime(r'%b %Y')
results = base_maps_worksheet.find(self.service_name, matchEntireCell=True)
cell = results[0]
base_maps_worksheet.update_value((cell.row + 1, cell.col), this_month)
| 42.934211
| 227
| 0.635611
| 1,186
| 9,789
| 5.043845
| 0.250422
| 0.025744
| 0.027583
| 0.025744
| 0.194584
| 0.111668
| 0.071882
| 0.071882
| 0.051488
| 0.036443
| 0
| 0.014606
| 0.251609
| 9,789
| 227
| 228
| 43.123348
| 0.801938
| 0.053632
| 0
| 0.138554
| 0
| 0.006024
| 0.167856
| 0.022604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0.006024
| 0.060241
| 0
| 0.144578
| 0.120482
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00214dc954dea8b8ef76726b14c1872ccfd1e59a
| 799
|
py
|
Python
|
robotarm/armservice/app.py
|
AmidBidee/Robot-Arm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | 1
|
2022-03-27T20:09:10.000Z
|
2022-03-27T20:09:10.000Z
|
robotarm/armservice/app.py
|
AmidBidee/Robot-Arm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | 4
|
2022-03-25T03:45:10.000Z
|
2022-03-29T14:31:16.000Z
|
robotarm/armservice/app.py
|
AmidBidee/RobotArm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
RobotArm API service config file
"""
import pathlib
from robotarm.armservice.views import api_views
from flask import (
Flask,
make_response,
jsonify
)
from robotarm.armservice import getenv
# initialize flask app
app = Flask(__name__)
# register/mount blueprint
app.register_blueprint(api_views)
# allow missing trailing
app.url_map.strict_slashes = False
@app.errorhandler(404)
def not_found(error):
"""
Handle non existing objects
Args:
error: [description]
Returns:
JSON: json object
"""
e = {
"error": "Not Found"
}
return make_response(jsonify(e), 404)
if __name__ == '__main__':
host = getenv("ARM_API_HOST", "0.0.0.0")
port = getenv("ARM_API_PORT", "5555")
app.run(host=host, port=port)
| 17.755556
| 47
| 0.677096
| 104
| 799
| 4.971154
| 0.548077
| 0.011605
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023659
| 0.206508
| 799
| 44
| 48
| 18.159091
| 0.791798
| 0.254068
| 0
| 0
| 0
| 0
| 0.102888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.285714
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0023e5b95c57b280f82e4d979d1eecb37cba4ae9
| 4,685
|
py
|
Python
|
xbrr/edinet/reader/element_schema.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
xbrr/edinet/reader/element_schema.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
xbrr/edinet/reader/element_schema.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
from xbrr.base.reader.base_element_schema import BaseElementSchema
from bs4.element import NavigableString, Tag
import bs4
class ElementSchema(BaseElementSchema):
def __init__(self,
name="", reference="", label="", alias="",
abstract="", data_type="",
period_type="", balance=""):
super().__init__()
self.name = name
self.reference = reference
self.label = label
self.alias = alias
self.abstract = abstract
self.period_type = period_type
self.balance = balance
self.verbose_label = ""
# data types:
# domain, textBlock, percent, perShare, boolean, date, decimal,
# monetary, nonNegativeInteger, shares, string
self.data_type = data_type
if data_type is not None and ':' in data_type:
self.data_type = data_type.split(':')[-1].replace('ItemType','')
def set_alias(self, alias):
self.alias = alias
return self
@classmethod
def create_from_reference(cls, reader, reference):
if not reader.xbrl_doc.has_schema: # for test purpose only
name = reference.split("#")[-1]
instance = cls(name=name, reference=reference)
return instance
instance = reader.get_schema_by_link(reference)
instance.reference = reference
return instance
@classmethod
def read_schema(cls, reader, xsduri):
xsd_dic = {}
xml = reader.read_uri(xsduri)
for element in xml.find_all("element"):
# <xsd:element id="jpcrp030000-asr_E00436-000_Subsidy" xbrli:balance="credit" xbrli:periodType="duration" abstract="false" name="Subsidy" nillable="true" substitutionGroup="xbrli:item" type="xbrli:monetaryItemType" />
instance = cls(name=element["id"], alias=element["name"],
data_type=element["type"],
period_type=element["xbrli:periodType"],
abstract=element["abstract"] if element.get("abstract") else "",
balance=element.get("xbrli:balance") if element.get("xbrli:balance") else "")
xsd_dic[element["id"]] = instance
return xsd_dic
@classmethod
def read_label_taxonomy(cls, reader, xsduri, xsd_dic):
label_xml = reader.read_label_of_xsd(xsduri)
loc_dic = {}
resource_dic = {}
def read_label(elem: bs4.element.Tag):
if elem.name == "loc":
attrs = elem.attrs
assert 'xlink:href' in attrs and 'xlink:label' in attrs
# href = jpcrp040300-q1r-001_E04251-000_2016-06-30_01_2016-08-12.xsd#jpcrp040300-q1r_E04251-000_ProvisionForLossOnCancellationOfContractEL
# label = ProvisionForLossOnCancellationOfContractEL
v = elem['xlink:href'].split('#')
assert len(v) == 2
loc_dic[elem['xlink:label']] = v[1]
elif elem.name == "label":
attrs = elem.attrs
if 'xlink:label' in attrs and 'xlink:role' in attrs:
label_role = "http://www.xbrl.org/2003/role/label"
verboseLabel_role = "http://www.xbrl.org/2003/role/verboseLabel"
if elem['xlink:role'] in [label_role, verboseLabel_role]:
resource_dic[elem['xlink:label']] = {'role': elem['xlink:role'], 'text': elem.text}
elif elem.name == "labelArc":
attrs = elem.attrs
if 'xlink:from' in attrs and 'xlink:to' in attrs and elem['xlink:to'] in resource_dic:
if elem['xlink:from'] in loc_dic and loc_dic[elem['xlink:from']] in xsd_dic:
ele = xsd_dic[loc_dic[elem['xlink:from']]]
res = resource_dic[elem['xlink:to']]
ele.set_label(**res) # Label(res['role'], res['text'])
for elem in label_xml.find_all('labelLink'): # "link:labelLink"
for child in elem.children:
if isinstance(child, Tag):
read_label(child)
def set_label(self, role, text):
if role.endswith('label'):
self.label = text
elif role.endswith('verboseLabel'):
self.verbose_label = text
def to_dict(self):
return {
"name": self.name,
"reference": self.reference,
"label": self.label,
"abstract": self.abstract,
"data_type": self.data_type,
"period_type": self.period_type,
"balance": self.balance
}
| 41.096491
| 229
| 0.56841
| 517
| 4,685
| 4.998066
| 0.253385
| 0.03096
| 0.02322
| 0.017415
| 0.093653
| 0.020124
| 0.020124
| 0
| 0
| 0
| 0
| 0.024876
| 0.313554
| 4,685
| 113
| 230
| 41.460177
| 0.778607
| 0.127001
| 0
| 0.11236
| 0
| 0
| 0.104683
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 1
| 0.089888
| false
| 0
| 0.033708
| 0.011236
| 0.191011
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0024cde788e4fc1c63bad501dddfdfc712994f43
| 817
|
py
|
Python
|
app/models.py
|
james-muriithi/news-hub
|
6f0fee2ab6be5bba86c4309050592e000859f8db
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
james-muriithi/news-hub
|
6f0fee2ab6be5bba86c4309050592e000859f8db
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
james-muriithi/news-hub
|
6f0fee2ab6be5bba86c4309050592e000859f8db
|
[
"Unlicense"
] | null | null | null |
from datetime import datetime
class Sources:
"""
Sources class to define sources object
"""
def __init__(self, id, name, description, url, category, country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
class Articles:
"""
Articles class to define articles object
"""
def __init__(self, author, title, description, url, url_to_Image, published_at, content):
self.author = author
self.title = title
self.description = description
self.url = url
self.url_to_Image = url_to_Image
self.content = content
self.published_at = datetime.strptime(published_at, "%Y-%m-%dT%H:%M:%SZ").strftime("%B %d, %Y")
| 27.233333
| 103
| 0.623011
| 99
| 817
| 4.969697
| 0.333333
| 0.042683
| 0.060976
| 0.069106
| 0.162602
| 0.162602
| 0.162602
| 0
| 0
| 0
| 0
| 0
| 0.274174
| 817
| 30
| 103
| 27.233333
| 0.82968
| 0.096695
| 0
| 0.222222
| 0
| 0
| 0.038136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
002d82da21503f07067ace5a4397c6b6011e7cc0
| 4,709
|
py
|
Python
|
pybps/preprocess/trnsys.py
|
dtavan/PyBPS
|
92bd063daed78a7fcff1af954d7d90d0cde8dcfc
|
[
"BSD-3-Clause"
] | 9
|
2015-03-12T15:23:42.000Z
|
2021-12-21T13:01:42.000Z
|
pybps/preprocess/trnsys.py
|
dtavan/PyBPS
|
92bd063daed78a7fcff1af954d7d90d0cde8dcfc
|
[
"BSD-3-Clause"
] | 3
|
2015-09-20T17:31:09.000Z
|
2018-02-26T13:11:53.000Z
|
pybps/preprocess/trnsys.py
|
dtavan/PyBPS
|
92bd063daed78a7fcff1af954d7d90d0cde8dcfc
|
[
"BSD-3-Clause"
] | 3
|
2019-02-14T08:13:03.000Z
|
2020-12-10T07:04:41.000Z
|
"""
A set of functions required to pre-process TRNSYS simulation input files
"""
# Common imports
import os
import sys
import shutil
import re
# Custom imports
from pybps import util
# Handle Python 2/3 compatibility
from six.moves import configparser
import six
if six.PY2:
ConfigParser = configparser.SafeConfigParser
else:
ConfigParser = configparser.ConfigParser
def parse_deck_const(deck_abspath):
"""Parse constants in control cards and equations from TRNSYS deck file
Finds all constants in a TRNSYS deck file and stores constant name and
value in a dict.
Args:
deck_abspath: absolute path to TRNSYS deck file
Returns:
A dict containing all found constants and their values
"""
const_dict = {}
f = open(deck_abspath, 'r')
split_blocks_pat = re.compile(r'[*][-]+')
equa_pat = re.compile(r'[*]\sEQUATIONS\s"(.+?)"')
const_pat = re.compile(r'\b(\w+)\b\s=\s(\d+\.*\d*)\s')
with f:
data = f.read()
blocks = split_blocks_pat.split(data)
for block in blocks:
if block[0] == 'V':
match_par = const_pat.findall(block)
if match_par:
group = "Control Cards"
const_dict[group] = {}
for (m,v) in match_par:
const_dict[group][m] = v
else:
match_eq = equa_pat.findall(block)
if match_eq:
group = match_eq[0]
match_par = const_pat.findall(block)
if match_par:
const_dict[group] = {}
for (m,v) in match_par:
const_dict[group][m] = v
return const_dict
def prepare_deck_template(deck_abspath, param_list):
"""Prepare a template TRNSYS deck file for parametric analysis
Transforms a TRNSYS deck in a template file valid for parametric analysis
by replacing constant values with parameter search strings (the name of
the constant surrounded by '%' signs). That parameters are given in a list.
Args:
deck_abspath: absolute path to TRNSYS deck file
param_list: list of parameters to be included in template
Returns:
A valid template file for parametric analysis with PyBPS
"""
templ_deck_abspath = os.path.splitext(deck_abspath)[0] + "_Template.dck"
shutil.copyfile(deck_abspath, templ_deck_abspath)
f = open(templ_deck_abspath, 'r+')
with f:
data = f.read()
for par in param_list:
data = re.sub(r'(' + par + r')\s=\s(\d+\.*\d*)', r'\g<1> = %\g<1>%', data)
f.seek(0)
f.write(data)
f.truncate()
def gen_type56(model_abspath, select='all'):
"""Generate Type56 matrices and idf files
Calls TRNBUILD.exe with flags to generate matrices and IDF files.
Args:
model_abspath: absolute path to Type56 model file
select: selects which files should by generated by TRNBUILD.
'masks' generates insolation matrix, 'vfm' generates de view factor
matrix, 'matrices' generates both
'idf' generates the IDF file (similar to TRNBUILD 'export' funtion)
'all' generates everything
Returns:
Generated files.
"""
# Get information from config file
conf = SafeConfigParser()
conf_file = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..\config.ini')
conf.read(conf_file)
trnbuild_path = os.path.abspath(conf.get('TRNSYS', 'TRNBuild_Path'))
trnsidf_path = os.path.abspath(conf.get('TRNSYS', 'trnsIDF_Path'))
# Get b17 file path from deck file
pattern = re.compile(r'ASSIGN "(.*b17)"')
with open(model_abspath, 'rU') as m_f:
temp = m_f.read()
match = pattern.search(temp)
# TRNBUILD is only called if Type56 is found in deck file.
if match:
b17_relpath = match.group(1)
b17_abspath = os.path.join(os.path.dirname(model_abspath), b17_relpath)
# Generate shading/insolation matrix
if select == 'all' or select == 'matrices' or select == 'masks':
cmd = [trnbuild_path, b17_abspath, '/N', '/masks']
util.run_cmd(cmd)
# Generate view factor matrix
if select == 'all' or select == 'matrices' or select == 'vfm':
cmd = [trnbuild_path, b17_abspath, '/N', '/vfm']
util.run_cmd(cmd)
# Generate trnsys3D idf file, to view geometry in Sketchup
if select == 'all' or select == 'idf':
cmd = [trnsidf_path, b17_abspath]
util.run_cmd(cmd)
| 31.393333
| 86
| 0.601826
| 607
| 4,709
| 4.546952
| 0.288303
| 0.039855
| 0.025362
| 0.022826
| 0.213406
| 0.161594
| 0.142754
| 0.121014
| 0.121014
| 0.032609
| 0
| 0.010593
| 0.298365
| 4,709
| 149
| 87
| 31.604027
| 0.824758
| 0.346358
| 0
| 0.263889
| 0
| 0
| 0.082168
| 0.017047
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.097222
| 0
| 0.152778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
002fac18e96ba58a8fc58e5945a2e85038de81e9
| 1,650
|
py
|
Python
|
gof/console.py
|
jul/game_of_life
|
0ffb798679f50dea27b55f8a630c437b6ee4d8f9
|
[
"Python-2.0"
] | 1
|
2015-01-13T13:42:32.000Z
|
2015-01-13T13:42:32.000Z
|
gof/console.py
|
jul/game_of_life
|
0ffb798679f50dea27b55f8a630c437b6ee4d8f9
|
[
"Python-2.0"
] | null | null | null |
gof/console.py
|
jul/game_of_life
|
0ffb798679f50dea27b55f8a630c437b6ee4d8f9
|
[
"Python-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Game of Life console.
small play ground for leanning python, or just having fun.
To use the console with an empty grid:
python -i -mgof.console
or
bpython -i -mgof.console
To use the console with a pseudo animation:
python -i -mgof.demo
Avalailble variable :
* grid your playground a matrix of cellular automata
* patterns : pixel, still, oscillator, glider
they are singular patterns to play with in game of life.
* all_pattern : a list of all patterns (except pixel)
* matrix : the class name of grid is imported for educationnal purpose
* DEAD, ALIVE
Available functions:
* intro() : a short summary of all available functions
* bleach(...) a function to init the grid
* at(...) a function to draw a pattern on the grid
* rand_pattern() : a function to add random pattern in your grid
* evolve(...) make the game evolve for some time. If your terminal and/or
interactive python supports it, it will make a continuous animation
* use help(function_name) to know more yes it is a builtin ^_^
"""
from .matrix import matrix
from time import sleep
from .gof import glider, oscillator, still, pixel, all_pattern
from .gof import evolve, bleach,dirty, DEAD, ALIVE, at
#### Constants and globals
__all__ = [
"matrix","at",
"grid", "intro",
"glider", "oscillator", "still","pixel","all_pattern",
"evolve", "bleach", "dirty", "DEAD", "ALIVE" ]
x=10
y=30
def intro():
print(__doc__)
print("""
you are left with an empty grid of %dx%d to play with, have fun""" % (x,y))
grid=matrix(x,y,x*y*[DEAD])
if '__main__' == __name__:
print(__doc__)
| 28.448276
| 79
| 0.693939
| 254
| 1,650
| 4.405512
| 0.444882
| 0.013405
| 0.029491
| 0.02681
| 0.144772
| 0.064343
| 0
| 0
| 0
| 0
| 0
| 0.003765
| 0.195152
| 1,650
| 57
| 80
| 28.947368
| 0.838855
| 0.661818
| 0
| 0.111111
| 0
| 0
| 0.283122
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.277778
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00316d362ddde2a4019229bccffbafd03e33e693
| 1,129
|
py
|
Python
|
covid19uk/data/tier_data_test.py
|
sdwfrost/covid19uk
|
ffd59342d9daee2d819d2f7211afbe9713880612
|
[
"MIT"
] | 10
|
2020-03-21T22:36:24.000Z
|
2021-05-23T22:47:13.000Z
|
covid19uk/data/tier_data_test.py
|
sdwfrost/covid19uk
|
ffd59342d9daee2d819d2f7211afbe9713880612
|
[
"MIT"
] | 14
|
2020-03-27T19:24:51.000Z
|
2021-07-21T12:41:23.000Z
|
covid19uk/data/tier_data_test.py
|
sdwfrost/covid19uk
|
ffd59342d9daee2d819d2f7211afbe9713880612
|
[
"MIT"
] | 13
|
2020-03-21T17:17:20.000Z
|
2021-05-06T22:50:18.000Z
|
"""Tests Tier Data"""
import numpy as np
from covid.data import TierData
def test_url_tier_data():
config = {
"AreaCodeData": {
"input": "json",
"address": "https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/LAD_APR_2019_UK_NC/FeatureServer/0/query?where=1%3D1&outFields=LAD19CD,LAD19NM&returnGeometry=false&returnDistinctValues=true&orderByFields=LAD19CD&outSR=4326&f=json",
"format": "ons",
"output": "processed_data/processed_lad19cd.csv",
"regions": ["E"],
},
"TierData": {
"input": "api",
"address": None,
"format": "api",
},
"GenerateOutput": {
"storeInputs": True,
"scrapedDataDir": "scraped_data",
"storeProcessedInputs": True,
},
"Global": {
"prependID": False,
"prependDate": False,
"inference_period": ["2020-10-12", "2021-01-04"],
},
}
xarr = TierData.process(config)
print("xarr", xarr)
np.testing.assert_array_equal(xarr.shape, [315, 84, 6])
| 30.513514
| 258
| 0.560673
| 109
| 1,129
| 5.688073
| 0.761468
| 0.025806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055901
| 0.28698
| 1,129
| 36
| 259
| 31.361111
| 0.714286
| 0.013286
| 0
| 0
| 0
| 0.033333
| 0.440433
| 0.032491
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.1
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00333d9d6b82d73d347ef774e208f7a11fd780ab
| 1,550
|
py
|
Python
|
Baixar um arquivo Excel e mandar o resultado para emails.py
|
GustavooBueno/Projetos-Python-Automacao
|
43ec53040cd543746d88e8523fcffbdb69112ab7
|
[
"MIT"
] | null | null | null |
Baixar um arquivo Excel e mandar o resultado para emails.py
|
GustavooBueno/Projetos-Python-Automacao
|
43ec53040cd543746d88e8523fcffbdb69112ab7
|
[
"MIT"
] | null | null | null |
Baixar um arquivo Excel e mandar o resultado para emails.py
|
GustavooBueno/Projetos-Python-Automacao
|
43ec53040cd543746d88e8523fcffbdb69112ab7
|
[
"MIT"
] | null | null | null |
import pyautogui
import time
import pyperclip
import pandas as pd
#pyautogui.displayMousePosition()
pyautogui.PAUSE = 1
#Passo 1
#Abrir uma nova aba
time.sleep(2)
pyautogui.hotkey('ctrl', 't')
#Entrar no link do sistema
link = "https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga"
pyperclip.copy(link)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('enter')
#Passo 2
time.sleep(5)
pyautogui.click(389, 270, clicks = 2)
time.sleep(2)
#Passo 3
pyautogui.click(401, 337) #clicar no arquivo
pyautogui.click(1713, 157) #clicar nos 3 pontos
pyautogui.click(1525, 561) #clicar no fazer download
time.sleep(10)
#Passo 4
tabela = pd.read_excel(r'C:\Users\Pichau\Downloads\Vendas - Dez.xlsx')
faturamento = tabela['Valor Final'].sum()
quantidade = tabela['Quantidade'].sum()
#Passo 5
time.sleep(2)
pyautogui.hotkey('ctrl', 't')
#Entrar no link do sistema
link = "https://mail.google.com/mail/u/0/#inbox"
pyperclip.copy(link)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('enter')
time.sleep(7)
pyautogui.click(33, 170)
pyautogui.write('gustavo.ibis.gb+diretoria@gmail.com')
pyautogui.press('tab')
pyautogui.press('tab')
assunto = 'Relatório de Vendas'
pyperclip.copy(assunto)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('tab')
texto_email = f"""
Prezados, bom dia
O faturamento de ontem foi de: R${faturamento:,.2f}
A quantidade de produtos foi de: R${quantidade:,.2f}
Abs
"""
pyperclip.copy(texto_email)
pyautogui.hotkey('ctrl', 'v')
pyautogui.hotkey('ctrl', 'enter')
| 25
| 82
| 0.713548
| 221
| 1,550
| 4.99095
| 0.457014
| 0.095195
| 0.12058
| 0.072529
| 0.267452
| 0.24116
| 0.210335
| 0.210335
| 0.210335
| 0.210335
| 0
| 0.042411
| 0.132903
| 1,550
| 62
| 83
| 25
| 0.778274
| 0.126452
| 0
| 0.355556
| 0
| 0
| 0.325273
| 0.052262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088889
| 0
| 0.088889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00341cca0d8a5cb36317216f72b4964ad336e187
| 34,289
|
py
|
Python
|
brain_class.py
|
featureCreacle/Baka-battle
|
0588216ff08bec1f6d4e0679daa8ac70e7f3d83a
|
[
"MIT"
] | 1
|
2017-12-23T11:16:35.000Z
|
2017-12-23T11:16:35.000Z
|
brain_class.py
|
featureCreacle/Baka-battle
|
0588216ff08bec1f6d4e0679daa8ac70e7f3d83a
|
[
"MIT"
] | null | null | null |
brain_class.py
|
featureCreacle/Baka-battle
|
0588216ff08bec1f6d4e0679daa8ac70e7f3d83a
|
[
"MIT"
] | null | null | null |
import tkinter
import json
from math import *
from random import *
from tkinter import *
class brain_abstract():
''' # один слой это лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы
laysConfigs = [ [(15, True, 1, False, (0,), 25, 10), (5, True, 1, False, (1,), 6, 10)],
[( 7, True, 0, True, (0,1), 20, 10), (4, True, 1, False, (0,1), 20, 10)],
[( 5, True, 1, False, (0,1), 11, 5), (15, False, 0, False, (0,1), 11, 5)],
[( 6, True, 1, False, (0,1), 20, 10)],
[( 2, True, 1, True, (0,), 6, 0)] ]'''
def __init__(self, laysConfigs = [], NNtemp = 70, cooldownTemp = 50):
self.lays = []
self.lay_counts = 0
self.NN_learning_temp = NNtemp
self.NN_cooldown_temp = cooldownTemp
if not laysConfigs:
# один слой это лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы
laysConfigs = [ [(10, True, 1, False, (0,), 25, 10), (7, False, 1, False, (0,), 25, 10), (3, True, 1, False, (1,), 3, 10)],
[(10, False, 0, True, (0,1),17, 0), (7, True, 1, True, (0,1,2), 20, 0)],
#[( 5, True, 1, False, (0,1), 14, 5), (15, False, 0, False, (0,1), 14, 5)],
[( 5, True, 1, False, (0,1), 17, 10)],
[( 2, True, 1, True, (0,), 5, 0)] ]
for lay_conf in laysConfigs:
self.lays.append(lay_abstact(lay_conf))
self.frozen_mind = {'scheme':laysConfigs, 'weights' :self.get_all_synapse_weight()}
def train(self, input = [], output = []):
lays_output = []
lays_output.append(input)
lay_input = input
for lay in self.lays:
lays_output.append( lay.get_excited(lay_input) )
lay_input = lays_output[-1]
desire_lay_output = output
i = len(self.lays)
j = 0
while i > 0:
lay_out = lays_output[i]
lay_in = lays_output[i-1]
lay_temp = self.NN_learning_temp - (j*self.NN_cooldown_temp)
changingNeuronsCount = round(self.lays[i-1].neuron_count * lay_temp / 100)
Mu = lay_temp / ( len(self.lays) * 100 )
changingNeuronsWeightCount = round(self.lays[i-2].neuron_count * (self.NN_learning_temp - (j*self.NN_cooldown_temp) ) / 100)
cool_inp = self.lays[i-1].fcingCooldown(lay_in, lay_out, desire_lay_output,
changingNeuronsCount, changingNeuronsWeightCount, Mu)
desire_lay_output = cool_inp
i-=1
j+=1
def get_err_out(self, input = [[],], output = [[],]):
return (1,1)
def guess(self, input = []):
lay_output = []
lay_input = input
for lay in self.lays:
lay_output = lay.get_excited(lay_input)
lay_input = lay_output
return lay_output
def learn(self, input = [], output = [], maxSteps = 3):
desire_lay_output = output
net_output = self.guess(input)
i = 0
while not self.isEqualOuts(net_output, desire_lay_output) and i < maxSteps:
self.train(input, output)
net_output = self.guess(input)
i+=1
def think(self):
pass
def isEqualOuts(self, out1, out2):
try:
len1 = len(out1)
i = 0
while i < len1:
len2 = len(out1[i])
j = 0
while j < len2:
if out1[i][j] != out2[i][j]:
return False
j+=1
i+=1
finally:
return False
return True
def get_lay_ref(self, layNum = 0):
return self.lays[layNum]
def get_all_synapse_weight(self):
synapse_weight = []
for lay in self.lays:
for lay_group in lay.neuronGroups_list:
for neuron in lay_group:
synapse_weight.extend(neuron.get_weights())
return synapse_weight
def load_consciousness(self, consciousness = [0]*100):
neuron_weights = []
shift = 0
for lay in self.lays:
for lay_group in lay.neuronGroups_list:
for neuron in lay_group:
neuron_weights = consciousness[shift:shift+neuron.weightsCount]
neuron.set_weights(neuron_weights)
shift += neuron.weightsCount
def get_draw_scheme(self):
draw_scheme = []
for lay in self.lays:
draw_scheme.append(lay.get_draw_scheme())
return draw_scheme
def draw_brain_scheme(self, root_win = None, width = 800, height = 600):
worm_brain = self
brain_scheme_width = width
brain_scheme_height = height
if root_win == None:
self.scheme_window = Tk()
self.scheme_window.title("Brain scheme")
else:
self.scheme_window = Toplevel(root_win)
self.canvas_brain = Canvas(self.scheme_window, width=brain_scheme_width + 60, height=brain_scheme_height + 40,
bg='white')
self.canvas_brain.pack()
# draw_scheme pattern [[ (Ncount, sign, links), ],]
brain_draw_scheme = worm_brain.get_draw_scheme()
color_lay = self.get_color(200, 200, 200)[1]
color_pos_group = self.get_color(150, 250, 150)[1]
color_neg_group = self.get_color(250, 150, 150)[1]
color_digital_neuron = self.get_color(120, 110, 120)[1]
color_analog_neuron = self.get_color(130, 170, 130)[1]
color_input = self.get_color(250, 250, 60)[1]
color_output = self.get_color(60, 250, 250)[1]
color_inside = self.get_color(250, 200, 120)[1]
lay_width = round(brain_scheme_width / len(brain_draw_scheme))
lay_height = brain_scheme_height
layNum = 0
dec_lay_height = brain_scheme_height / (2.5 * len(brain_draw_scheme) )
group_out_cord = []
prev_lay_group_out_cord = []
for lay_scheme in brain_draw_scheme:
group_count = len(lay_scheme)
group_height = round(lay_height / group_count)
groups_heights = []
for group in lay_scheme:
groups_heights.append(group[0])
groups_heights = self.get_proportion(groups_heights)
max_group_height = max(groups_heights)
min_group_height = min(groups_heights)
while (max_group_height - min_group_height) > 2 * min_group_height:
minNum = groups_heights.index(min_group_height)
maxNum = groups_heights.index(max_group_height)
delta = groups_heights[maxNum] * 0.1
groups_heights[maxNum] -= delta
groups_heights[minNum] += delta
max_group_height = max(groups_heights)
min_group_height = min(groups_heights)
x_preset = 30 + layNum * lay_width
y_preset = 20 + layNum * dec_lay_height
lay_thicc = lay_width/3
self.canvas_brain.create_rectangle(x_preset+lay_thicc, y_preset, x_preset+lay_thicc*2,
lay_height-y_preset, fill=color_lay, outline=color_lay)
gr_bound = 10
lay_h = lay_height - 2 * y_preset
gr_preset = y_preset + gr_bound
gNum = 0
for group_h in groups_heights:
group_h *= lay_h
color_group = color_pos_group if lay_scheme[gNum][1] > 0 else color_neg_group
n_count = lay_scheme[gNum][0]
#lay
x_group_preset = x_preset + lay_thicc + gr_bound
self.canvas_brain.create_rectangle(x_group_preset,
gr_preset,
x_group_preset + lay_thicc - 2 * gr_bound,
group_h+gr_preset - 2 * gr_bound,
fill=color_group, outline=color_group)#"#000000")
#in
x_input_vec_preset = x_preset + lay_thicc/2
x_inp_vec_width = lay_thicc/10
self.canvas_brain.create_rectangle(x_input_vec_preset,
gr_preset,
x_input_vec_preset + x_inp_vec_width,
group_h + gr_preset - 2 * gr_bound,
fill=color_input, outline="#000000")
#in vector
weight_count = lay_scheme[gNum][4]
weight_height = (group_h - 2 * gr_bound)/weight_count
for i in range(0,weight_count):
self.canvas_brain.create_line(x_input_vec_preset,
gr_preset + i*weight_height,
x_input_vec_preset + x_inp_vec_width,
gr_preset + i*weight_height,
fill='#000000')
#in link
for link in lay_scheme[gNum][3]:
if brain_draw_scheme.index(lay_scheme) == 0:
break
cord = prev_lay_group_out_cord[link]
self.canvas_brain.create_line(cord[0],
cord[1],
x_input_vec_preset,
gr_preset + (group_h - 2 * gr_bound) / 2,
fill='#000000', arrow=LAST)
#out
x_output_vec_preset = x_input_vec_preset + 2* lay_thicc
x_out_vec_width = lay_thicc/10
self.canvas_brain.create_rectangle(x_output_vec_preset,
gr_preset,
x_output_vec_preset + x_out_vec_width,
group_h + gr_preset - 2 * gr_bound,
fill=color_output, outline="#000000")
group_out_cord.append((x_output_vec_preset + x_out_vec_width,
gr_preset + (group_h - 2 * gr_bound) / 2))
#out vector
out_height = (group_h - 2 * gr_bound)/n_count
for i in range(0,n_count):
self.canvas_brain.create_line(x_output_vec_preset,
gr_preset + i*out_height,
x_output_vec_preset + x_inp_vec_width,
gr_preset + i*out_height,
fill='#000000')
nr_preset = gr_preset
nr_bound_w = 4
nr_bound_h = 4
neuron_draw_size = (group_h - 2 * gr_bound) / n_count
neuron_area_width = lay_thicc - 2 * gr_bound
min_neuron_draw_size = (lay_thicc - 2 * gr_bound)/2
gr_h = (group_h - 2 * gr_bound)
column_count = 1
n_in_col = n_count
neuron_area = 0
down_shift = 0
if neuron_draw_size > neuron_area_width:
nr_bound_w = 4
neuron_draw_size = neuron_area_width - 2 * nr_bound_w
nr_bound_h = (gr_h - (neuron_draw_size * n_count)) / (2 * n_count)
elif neuron_draw_size < neuron_area_width:
if neuron_draw_size < min_neuron_draw_size:
neuron_area = sqrt((neuron_area_width * gr_h) / (n_count*1.5) )
column_count = ceil((n_count * neuron_area) / gr_h)
n_in_col = round(n_count / column_count)
column_count += 0 if n_count % column_count == 0 else 1
if neuron_area_width/column_count < neuron_area:
neuron_draw_size = neuron_area_width/column_count
column_count = ceil((n_count * neuron_draw_size) / gr_h)
n_in_col = round(n_count / column_count)
column_count += 0 if n_count % column_count == 0 else 1
else:
neuron_draw_size = neuron_area
nr_bound_w = (neuron_area_width - (neuron_draw_size * column_count)) / (column_count + 1)
nr_bound_h = (gr_h - (neuron_draw_size * n_in_col)) / (2 * n_in_col)
if nr_bound_w < 1:
nr_bound_w = 1
nr_bound_h = 1
neuron_draw_size = (neuron_area_width - (nr_bound_w * (column_count + 1))) / column_count
if neuron_draw_size < 2:
nr_bound_w = 1
nr_bound_h = 1
neuron_draw_size = 2
neuron_area = neuron_draw_size+nr_bound_w+nr_bound_h
n_count = round((neuron_area_width/neuron_area) * (gr_h/neuron_area))
column_count = ceil((n_count * neuron_area) / gr_h)
n_in_col = round(n_count / column_count)
n_count = column_count * n_in_col
down_shift = (gr_h - ((neuron_draw_size + 2*nr_bound_h) * (n_in_col)))/2
else:
nr_bound_h = neuron_draw_size*0.1
neuron_draw_size = neuron_draw_size - 2 * nr_bound_h
nr_bound_w = (neuron_area_width - neuron_draw_size ) / 2
colNum = 0
nrNum = 0
draw_neuron = 0
while draw_neuron < n_count:
nrNum+=1
if lay_scheme[gNum][2]:
color_neuron = color_digital_neuron
else:
color_neuron = color_analog_neuron
# neuron
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (neuron_draw_size + nr_bound_w),
nr_preset + down_shift + nr_bound_h,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size,
nr_preset + down_shift + neuron_draw_size + nr_bound_h,
start=1, extent=359, fill=color_neuron, outline=color_neuron)
if lay_scheme[gNum][2]:
#center
self.canvas_brain.create_rectangle(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
fill=color_inside, outline=color_inside)
else:
#center
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
if colNum == 0:
#input
self.canvas_brain.create_line(x_input_vec_preset + x_inp_vec_width,
gr_preset + (group_h - 2 * gr_bound) / 2,
x_group_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#output
self.canvas_brain.create_line(x_group_preset + lay_thicc - 2 * gr_bound,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
x_output_vec_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#self.canvas_brain.create_line(x_output_vec_preset,
# nr_preset,
# x_output_vec_preset + x_out_vec_width,
# nr_preset,
# fill='#000000')
nr_preset = nr_preset + neuron_draw_size + 2 * nr_bound_h
if nrNum == n_in_col:
colNum+=1
nrNum = 0
if colNum % 2 == 0:
nr_preset = gr_preset
else:
nr_preset = gr_preset
draw_neuron+=1
gr_preset = group_h+gr_preset
gNum+=1
layNum+=1
prev_lay_group_out_cord = group_out_cord
group_out_cord = []
self.scheme_window.mainloop()
def get_proportion(self, vector = []):
sum = 0
for el in vector:
sum += el
out_vector = []
for el in vector:
out_vector.append(el / sum)
return out_vector
def get_color(self, r=0, g=0, b=0):
clr = ((r * 1.0, g * 1.0, b * 1.0),
'#' + r.to_bytes(1, 'little').hex().__str__()
+ g.to_bytes(1, 'little').hex().__str__()
+ b.to_bytes(1, 'little').hex().__str__())
return clr
def frozed_mind(self):
build_scheme = []
for lay in self.lays:
build_scheme.append(lay.get_build_scheme())
consciousness = self.get_all_synapse_weight()
self.frozen_mind = {'scheme': build_scheme, 'weights': consciousness}
return self.frozen_mind
def unfrozed_mind(self, ice_piece):
self.__init__(laysConfigs = ice_piece['scheme'])
self.load_consciousness(consciousness = ice_piece['weights'])
def save_to_file(self, filename = 'frozen_mind.txt'):
self.frozed_mind()
f = open(filename, 'w')
f.write(json.dumps(self.frozen_mind))
f.close()
def load_from_file(self, filename = 'frozen_mind.txt'):
f = open(filename, 'r')
json_mind = f.read()
self.unfrozed_mind(ice_piece = json.loads(json_mind))
f.close()
def __gt__(self, other):
return self.NN_learning_temp > other.NN_learning_temp
def __lt__(self, other):
return self.NN_learning_temp < other.NN_learning_temp
def __ge__(self, other):
return self.NN_learning_temp >= other.NN_learning_temp
def __le__(self, other):
return self.NN_learning_temp <= other.NN_learning_temp
class lay_abstact():
'''layConfig = [ (0, True, 0, (0,), 25, 0), ]
лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы'''
def __init__(self, layConfig = [ (0, True, 0, True, (0,), 25, 0), ]):
self.neuronGroups_count = len(layConfig)
self.neuronGroups_list = []
self.neuronGroups_inputs_link = []
self.groupsNormalization_coeff = []
self.neuron_count = 0
if layConfig[0][0] == 0:
pass
else:
for neuron_group in layConfig:
neuton_with_pos_out = neuron_group[1]
lay_temp = []
i = 0
while i < neuron_group[0]:
neuron = neuron_abstact(generateWeightsCount=neuron_group[5],
positiveOutput=neuton_with_pos_out,
funcNum=neuron_group[2],
digitalOut=neuron_group[3] )
lay_temp.append(neuron)
self.neuron_count+=1
i+=1
self.neuronGroups_list.append(lay_temp)
self.neuronGroups_inputs_link.append(neuron_group[4])
self.groupsNormalization_coeff.append(neuron_group[6])
def get_excited(self, inputsGoups = [[],]):
output_groups = []
i=0
while i<self.neuronGroups_count:
group_input = []
neurou_group_output = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputsGoups[inputgroupNum])
for neuron in self.neuronGroups_list[i]:
neurou_group_output.append(neuron.spike( group_input ))
if self.groupsNormalization_coeff[i] != 0:
neurou_group_output = self.normalize_vector(neurou_group_output, self.groupsNormalization_coeff[i])
output_groups.append(neurou_group_output)
i+=1
return output_groups
def normalize_vector(self, inputVector = [], norm_coeff = 1):
min_val = min(inputVector)
max_val = max(inputVector)
delitel = max_val - min_val
normalOutputVector = []
if delitel == 0:
for x_val in inputVector:
norm_x_val = 0
normalOutputVector.append(norm_x_val)
else:
for x_val in inputVector:
norm_x_val = ( (x_val - min_val) * norm_coeff )/delitel
normalOutputVector.append(norm_x_val)
return normalOutputVector
def fcingCooldown(self, inputGroups = [[],], output = [[],], desire_out = [[],],
changingNeuronsCount = 1, changingNeuronsWeightCount = 1, Lwa = 1):
errOuts = self.get_err_out(output, desire_out)
errOuts.sort()
changing_errOuts = []
changing_neuron_weights_info = []
inputs = []
len_groupinput = []
i = 0
while i < self.neuronGroups_count:
group_input = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputGroups[inputgroupNum])
inputs.append(group_input)
i+=1
i = 0
while i < changingNeuronsCount and i < len(errOuts):
changing_errOuts.append(errOuts[i])
i+=1
for err_out in changing_errOuts:
err_neuron = self.neuron_at(err_out[2])
err_neuron_vector = err_neuron.learning_spike(inputs[err_out[2][0]])
err_neuron_vector.sort()
i = 0
Nwa = Lwa
while i < changingNeuronsWeightCount:
if err_out[1] == 1:#надо увеличить выход нейрона
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #надо уменьшить выход
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
changing_neuron_weights_info.append( (err_out, err_neuron_vector[i], Nwa) )
i+=1
input_group_len = []
for input in inputGroups:
input_group_len.append(len(input))
for ch_neur in changing_neuron_weights_info:
#изменить инпут пропорионально аджастам весов нейронов
Gnum = 0
Nnum = ch_neur[1][2]
for link in self.neuronGroups_inputs_link[Gnum]:
if Nnum >= input_group_len[link]:
Nnum -= input_group_len[link]
else:
Gnum = link
break
inputGroups[Gnum][Nnum] += inputGroups[Gnum][Nnum] * ch_neur[2]
return inputGroups
def get_err_out(self, list, example_list):
'''На входе лист листов с интами'''
err_elements = []
try:
len1 = len(list)
i = 0
while i < len1:
len2 = len(list[i])
j = 0
while j < len2:
if list[i][j] != example_list[i][j]:
diff = example_list[i][j] - list[i][j]
delta = abs(diff)
sign = 1 if diff > 0 else -1
er_element = (delta, sign, (i,j))
err_elements.append(er_element)
j+=1
i+=1
finally:
return err_elements
return err_elements
def neuron_at(self, coord):
'''coord = (groupNum, neuronNum)'''
return self.neuronGroups_list[coord[0]][coord[1]]
def get_neuron_group_ref(self, NgNum = 0):
return self.neuronGroups_list[NgNum]
def get_draw_scheme(self):
draw_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = group[0].output_sign
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
draw_scheme.append((neuron_count, sigh, digital_out, links, weights_count))
return draw_scheme
def get_build_scheme(self):
build_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = True if group[0].output_sign == 1 else False
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
func_num = group[0].funcNum
normal_coef = self.groupsNormalization_coeff[self.neuronGroups_list.index(group)]
build_scheme.append((neuron_count, sigh, func_num, digital_out, links, weights_count, normal_coef))
return build_scheme
class neuron_abstact():
'''funcNum: 0 - сумматор (если цифровой выход, то пороговый сумматор
1 - рациональная сигмоида
threshold порог срабатывания для цифрового выхода'''
def __init__(self, weights = [], generateWeightsCount = 0, positiveOutput = True, funcNum = 0,
digitalOut = True):
if generateWeightsCount > 0:
self.weights = []
self.set_random_weights(weights, generateWeightsCount)
else:
self.weights = weights
self.weightsCount = len(self.weights)
self.output_sign = 1 if positiveOutput else -1
self.digital_out = digitalOut
self.funcNum = funcNum
self.threshold = round(generateWeightsCount/2)
self.recurrent_mem = []
def set_random_weights(self, weights = [], weightsCount = 25,):
if len(weights) == 0:
self.weights = [(9.9 + x - x) / randint(1, 100) + 0.1 for x in range(weightsCount)]
else:
self.weights = weights.copy()
self.weightsCount = len(self.weights)
def set_funcNum(self,funcNum):
self.funcNum = funcNum
def set_weights(self, weights = []):
self.weights = weights
self.weightsCount = len(self.weights)
if self.funcNum == 1:
for weight in self.weights:
if weight == 0:
weights = self.get_random_weight(from_=0.1,to=10)
def get_weights(self):
return self.weights
def get_random_weight(self, from_ = 0, to = 10):
return (to - from_) / randint(1, 100) + from_
def spike(self, input = []):
if self.funcNum == 0: #linear sum
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] * self.weights[i] )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 1: #rational sig
output = 0
i = 0
while i < self.weightsCount:
abs_inp = abs(input[i])
output += abs_inp / ( abs_inp + abs(self.weights[i]) )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 2: #RelU
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] + self.weights[i] ) * self.weights[i] #закоментить, если черви сойдут с ума
i+=1
output = max([0, output])
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
else:
pass
def learning_spike(self, input = []):
'''output = [(x,y,z),] x - input*weight, y - output sigh, z - weight number'''
output = []
i = 0
if self.funcNum == 1:
while i < self.weightsCount:
abs_inp = abs(input[i])
output.append( ( abs_inp/(abs_inp + self.weights[i]), self.output_sign, i ) )
i+=1
else:
while i < self.weightsCount:
output.append( (input[i] * self.weights[i], self.output_sign, i) )
i+=1
return output
def adjust_weight(self, weightNum = 0, Nwa = 0 ):
if self.funcNum == 1:
self.weights[weightNum] -= Nwa * self.weights[weightNum]
if self.weights[weightNum] == 0:
self.weights[weightNum] = self.get_random_weight(from_=0.1,to=10)
else:
self.weights[weightNum] += Nwa * self.weights[weightNum]
| 46.906977
| 143
| 0.495727
| 3,746
| 34,289
| 4.235718
| 0.094768
| 0.030251
| 0.042352
| 0.018529
| 0.496376
| 0.434172
| 0.381547
| 0.349783
| 0.320413
| 0.27283
| 0
| 0.030444
| 0.421389
| 34,289
| 730
| 144
| 46.971233
| 0.769304
| 0.08023
| 0
| 0.369565
| 0
| 0
| 0.004989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06689
| false
| 0.005017
| 0.008361
| 0.01505
| 0.135452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0034f805a844662bdcbee5246fde715c74fdfb46
| 761
|
py
|
Python
|
xiamiu/urls.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
xiamiu/urls.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
xiamiu/urls.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='home'),
url(r'^artist/(?P<inputID>.*?)/$', views.showArtistPage, name='showArtistPage'),
url(r'^album/(?P<inputID>.*?)/$', views.showAlbumPage, name='showAlbumPage'),
url(r'^song/(?P<inputID>.*?)/$', views.showSongPage, name='showSongPage'),
url(r'^genre/(?P<inputID>.*?)/$', views.showGenrePage, name='showGenrePage'),
url(r'^search/$', views.search, name='search'),
url(r'^search/artist/$', views.searchArtistByName, name='searchArtistByName'),
url(r'^search/album/$', views.searchAlbumByName, name='searchAlbumByName'),
url(r'^search/song/$', views.searchSongByName, name='searchSongByName')
]
| 44.764706
| 84
| 0.672799
| 87
| 761
| 5.885057
| 0.310345
| 0.0625
| 0.101563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106439
| 761
| 16
| 85
| 47.5625
| 0.752941
| 0
| 0
| 0
| 0
| 0
| 0.350854
| 0.131406
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00350a7f72c371d77bedf708f8a55456f2d37c38
| 19,030
|
py
|
Python
|
lib/python/treadmill_aws/formatter.py
|
Morgan-Stanley/treadmill-aws
|
4c3d25c477422d83f0cd8dc6851fd02ffa48dcbb
|
[
"Apache-2.0"
] | 6
|
2018-05-24T17:17:51.000Z
|
2020-06-06T02:21:59.000Z
|
lib/python/treadmill_aws/formatter.py
|
Morgan-Stanley/treadmill-aws
|
4c3d25c477422d83f0cd8dc6851fd02ffa48dcbb
|
[
"Apache-2.0"
] | 93
|
2018-04-16T16:14:40.000Z
|
2019-09-17T22:10:28.000Z
|
lib/python/treadmill_aws/formatter.py
|
Morgan-Stanley/treadmill-aws
|
4c3d25c477422d83f0cd8dc6851fd02ffa48dcbb
|
[
"Apache-2.0"
] | 17
|
2017-09-29T10:30:47.000Z
|
2019-01-28T21:52:03.000Z
|
"""Table CLI formatter.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import yaml
from treadmill.formatter import tablefmt
def _sort(unsorted):
"""Sort list."""
unsorted.sort()
return '\n'.join(unsorted)
def _state(state):
"""Get status from instance."""
return state['Name']
def _name_from_tags(tags):
"""Get name from tags."""
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def _fmt_tags():
"""Output formatter tags."""
def _fmt(items):
"""Format tags, discard cloudformation tags."""
filtered = [
item for item in items
if not item['Key'].startswith('aws:cloudformation:')
]
schema = [
('key', 'Key', None),
('value', 'Value', None),
]
return tablefmt.list_to_table(
filtered, schema, header=False, align=None
)
return _fmt
def _fmt_secgroups():
"""Output formatter security groups."""
def _fmt(items):
"""Format tags, discard cloudformation tags."""
schema = [
('name', 'GroupName', None),
('id', 'GroupId', None),
]
return tablefmt.list_to_table(
items, schema, header=False, align=None
)
return _fmt
def _fmt_list():
"""Output formatter list."""
def _fmt(items):
"""Format list."""
schema = [
('item', None, None),
]
return tablefmt.list_to_table(
items, schema, header=False, align=None
)
return _fmt
def _fmt_trusted_entities(policy):
def _statement_principals(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRole' and
statement['Effect'] == 'Allow' and
'AWS' in statement['Principal']):
principals = statement['Principal']['AWS']
if isinstance(principals, str):
principals = [principals]
principals.sort()
for principal in principals:
parts = principal.split(':')
parts[5] = parts[5].replace('/', ':')
entities.append({'Entity': parts[5], 'Arn': principal})
return entities
def _statement_saml_providers(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRoleWithSAML' and
statement['Effect'] == 'Allow'):
saml_providers = statement['Principal']['Federated']
if isinstance(saml_providers, str):
saml_providers = [saml_providers]
saml_providers.sort()
for saml_provider in saml_providers:
parts = saml_provider.split(':')
parts[5] = parts[5].replace('/', ':')
entities.append({'Entity': parts[5], 'Arn': saml_provider})
return entities
def _statement_services(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRole' and
statement['Effect'] == 'Allow' and
'Service' in statement['Principal']):
services = statement['Principal']['Service']
if isinstance(services, str):
services = [services]
services.sort()
for service in services:
entities.append({'Entity': 'service:%s' % service,
'Arn': service})
return entities
# pylint: disable=R0912
def _trusted_entities(pol):
entities = []
for statement in pol['Statement']:
principals = _statement_principals(statement)
if principals:
for principal in principals:
entities.append(principal)
saml_providers = _statement_saml_providers(statement)
if saml_providers:
for saml_provider in saml_providers:
entities.append(saml_provider)
services = _statement_services(statement)
if services:
for service in services:
entities.append(service)
return entities
items = _trusted_entities(policy)
schema = [
('Entity', 'Entity', None),
('Arn', 'Arn', None)
]
return tablefmt.list_to_table(items, schema, header=False, align=None)
def _fmt_attached_policies(policies):
def _fpolicies(policies):
fpolicies = []
for policy in policies:
if policy['PolicyArn']. startswith('arn:aws:iam::aws:policy/'):
pn = policy['PolicyArn'].replace('arn:aws:iam::aws:policy/',
'')
fpolicies.append({
'Type': 'global',
'PolicyName': pn,
'PolicyArn': policy['PolicyArn']
})
else:
fpolicies.append({
'Type': 'local',
'PolicyName': policy['PolicyName'],
'PolicyArn': policy['PolicyArn']
})
return fpolicies
items = _fpolicies(policies)
schema = [
('Type', 'Type', None),
('PolicyName', 'PolicyName', None),
('PolicyArn', 'PolicyArn', None),
]
return tablefmt.list_to_table(items,
schema,
header=False,
align=None,
sortby='PolicyName')
def _fmt_policy_version(policy_version):
return yaml.dump(policy_version, default_flow_style=False, indent=4)
class SubnetPrettyFormatter:
"""Pretty table formatter for AWS subnets."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', 'SubnetId', None),
('state', 'State', None),
('zone', 'AvailabilityZone', None),
('cidr_block', 'CidrBlock', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class VpcPrettyFormatter:
"""Pretty table formatter for AWS vpcs."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', 'VpcId', None),
('default', 'IsDefault', None),
('state', 'State', None),
('cidr_block', 'CidrBlock', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class InstancePrettyFormatter:
"""Pretty table formatter for AWS instances."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
item_schema = [
('hostname', 'Tags', _name_from_tags),
('id', 'InstanceId', None),
('arch', 'Architecture', None),
('image', 'ImageId', None),
('type', 'InstanceType', None),
('key', 'KeyName', None),
('launch', 'LaunchTime', None),
('state', 'State', _state),
('vpc', 'VpcId', None),
('subnet', 'SubnetId', None),
('secgroups', 'SecurityGroups', _fmt_secgroups()),
('tags', 'Tags', _fmt_tags()),
]
list_schema = [
('hostname', 'Tags', _name_from_tags),
('id', 'InstanceId', None),
('image', 'ImageId', None),
('type', 'InstanceType', None),
('key', 'KeyName', None),
('vpc', 'VpcId', None),
('subnet', 'SubnetId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SpotPrettyFormatter:
"""Pretty table formatter for Spot Instance Requests."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
item_schema = [
('id', 'id', None),
('status', 'state', None),
('code', 'status_code', None),
('changed', 'status_timestamp', None),
('zone', 'az', None),
('subnet', 'subnet', None),
('type', 'instance_type', None),
('instance_id', 'instance_id', None),
('ami_id', 'ami_id', None),
('hostname', 'hostname', None),
('launch', 'instance_launch', None),
('state', 'instance_status', None),
('duration', 'duration', None),
]
list_schema = item_schema
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamRolePrettyFormatter:
"""Pretty table formatter for AWS IAM roles."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('RoleName', 'RoleName', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
]
item_schema = [
('RoleName', 'RoleName', None),
('Path', 'Path', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
('RoleId', 'RoleId', None),
('TrustedEntities',
'AssumeRolePolicyDocument',
_fmt_trusted_entities),
('InlinePolicies', 'RolePolicies', None),
('AttachedPolicies', 'AttachedPolicies', _fmt_attached_policies),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamPolicyPrettyFormatter:
"""Pretty table formatter for AWS IAM policies."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('AttachmentCount', 'DefaultVersionId', None),
('DefaultVersionId', 'DefaultVersionId', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
]
item_schema = [
('Arn', 'Arn', None),
('PolicyName', 'PolicyName', None),
('Path', 'Path', None),
('DefaultVersionId', 'DefaultVersionId', None),
('IsAttachable', 'IsAttachable', None),
('AttachmentCount', 'AttachmentCount', None),
('Description', 'Description', None),
('CreateDate', 'CreateDate', None),
('UpdateDate', 'UpdateDate', None),
('PolicyVersion', 'PolicyVersion', _fmt_policy_version)
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SnapshotPrettyFormatter:
"""Pretty table formatter for AWS snaphots."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('Name', 'Tags', _name_from_tags),
('SnapshotId', 'SnapshotId', None),
('VolumeId', 'VolumeId', None),
('State', 'State', None),
('Progress', 'Progress', None),
('VolumeSize', 'VolumeSize', None),
('StartTime', 'StartTime', None),
('Description', 'Description', None),
]
item_schema = [
('Name', 'Tags', _name_from_tags),
('Description', 'Description', None),
('SnapshotId', 'SnapshotId', None),
('VolumeId', 'VolumeId', None),
('State', 'State', None),
('Progress', 'Progress', None),
('VolumeSize', 'VolumeSize', None),
('StartTime', 'StartTime', None),
('Encrypted', 'Encrypted', None),
('KmsKeyId', 'KmsKeyId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class ImagePrettyFormatter:
"""Pretty table formatter for AWS images."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('id', 'ImageId', None),
('name', 'Name', None),
('owner', 'OwnerId', None),
('created', 'CreationDate', None),
('public', 'Public', lambda v: 'yes' if v else 'no'),
('state', 'State', None),
]
item_schema = list_schema + [
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SecgroupPrettyFormatter:
"""Pretty table formatter for AWS security groups."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('id', 'GroupId', None),
('owner', 'OwnerId', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
# TODO: add ip ingress/egress permissions to the output.
item_schema = [
('id', 'GroupId', None),
('owner', 'OwnerId', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IpaUserPrettyFormatter:
"""Pretty table formatter for AWS user."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('username', 'uid', lambda _: _[0]),
]
item_schema = [
('username', 'uid', lambda _: _[0]),
('class', 'userclass', lambda _: _[0]),
('groups', 'memberof_group', _sort),
('indirect-groups', 'memberofindirect_group', '\n'.join),
('hbac-rule', 'memberofindirect_hbacrule', '\n'.join),
('sudo-rule', 'memberofindirect_sudorule', '\n'.join),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamUserPrettyFormatter:
"""Pretty table formatter for AWS users."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('UserName', 'UserName', None),
('Arn', 'Arn', None),
]
item_schema = [
('UserName', 'UserName', None),
('Path', 'Path', None),
('Arn', 'Arn', None),
('CreateDate', 'CreateDate', None),
('UserId', 'UserId', None),
('InlinePolicies', 'UserPolicies', None),
('AttachedPolicies', 'AttachedPolicies', _fmt_attached_policies),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class CellDataFormatter:
"""Pretty table formatter for cell data."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('aws_account', 'aws_account', None),
('aws_admin', 'aws_admin', None),
('aws_region', 'aws_region', None),
('docker-registries', 'docker_registries', ','.join),
('disk-size', 'disk_size', None),
('hostgroups', 'hostgroups', ','.join),
('image', 'image', None),
('image-accounts', 'image_accounts', ','.join),
('instance-profile', 'instance_profile', None),
('realm', 'realm', None),
('secgroup', 'secgroup', None),
('size', 'size', None),
('sns-topic', 'sns_topic', None),
('subnets', 'subnets', ','.join),
('s3_registry_bucket', 's3_registry_bucket', None),
('tls_certs', 'tls_certs', None),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class PartDataFormatter:
"""Pretty table formatter for partition data."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('autoscale', 'autoscale', None),
('image', 'image', None),
('image-accounts', 'image_accounts', ','.join),
('instance-types', 'instance_types', ','.join),
('spot-instance-types', 'spot_instance_types', ','.join),
('spot-duration', 'spot_duration', None),
('disk-size', 'disk_size', None),
('hostgroups', 'hostgroups', ','.join),
('secgroup', 'secgroup', None),
('instance-profile', 'instance_profile', None),
('subnets', 'subnets', ','.join),
('s3_registry_bucket', 's3_registry_bucket', None),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
| 31.094771
| 77
| 0.534314
| 1,724
| 19,030
| 5.692575
| 0.140951
| 0.039739
| 0.020175
| 0.030467
| 0.563685
| 0.525168
| 0.489097
| 0.485225
| 0.466884
| 0.443754
| 0
| 0.001402
| 0.325328
| 19,030
| 611
| 78
| 31.145663
| 0.762988
| 0.066369
| 0
| 0.58114
| 0
| 0
| 0.17641
| 0.00944
| 0
| 0
| 0
| 0.001637
| 0
| 1
| 0.065789
| false
| 0
| 0.013158
| 0.002193
| 0.203947
| 0.002193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0035d39504b6fcd873cb06e3d139aa8135704401
| 10,156
|
py
|
Python
|
src/newt/db/updater.py
|
bmjjr/db
|
39d3833f4458fcd20d09f383711745842b5db4f2
|
[
"MIT"
] | 153
|
2017-01-24T16:55:00.000Z
|
2022-03-21T08:24:13.000Z
|
src/newt/db/updater.py
|
bmjjr/db
|
39d3833f4458fcd20d09f383711745842b5db4f2
|
[
"MIT"
] | 14
|
2017-01-25T17:04:49.000Z
|
2021-12-05T19:26:35.000Z
|
src/newt/db/updater.py
|
bmjjr/db
|
39d3833f4458fcd20d09f383711745842b5db4f2
|
[
"MIT"
] | 16
|
2017-01-25T07:25:17.000Z
|
2022-03-21T08:24:16.000Z
|
from __future__ import print_function
"""Updates database json representation
"""
import argparse
import itertools
import logging
import relstorage.adapters.postgresql
import relstorage.options
import sys
from . import pg_connection
from . import follow
from .jsonpickle import Jsonifier
from ._adapter import DELETE_TRIGGER
from ._util import closing, table_exists, trigger_exists
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('connection_string', help='Postgresql connection string')
parser.add_argument('-t', '--poll-timeout', type=int, default=300,
help='Change-poll timeout, in seconds')
parser.add_argument('-m', '--transaction-size-limit', type=int, default=100000,
help='Transaction size limit (aproximate)')
parser.add_argument(
'-l', '--logging-configuration', default='info',
help='Logging configuration file path, or a logging level name')
parser.add_argument(
'-d', '--driver', default='auto',
help='Provide an explicit Postgres driver name (e.g. psycopg2)')
parser.add_argument(
'-T', '--remove-delete-trigger', action="store_true",
help="""\
Remove the Newt DB delete trigger, if it exists.
The Newt DB delete trigger is incompatible with the updater. It can cause
deadlock errors is packed while the updater is running.
""")
gc_sql = """
delete from newt n where not exists (
select from object_state s where n.zoid = s.zoid)
"""
parser.add_argument(
'-g', '--gc-only', action="store_true",
help="""\
Collect garbage and exit.
This removes Newt DB records that don't have corresponding database records.
This is done by executing:
%s
Note that garbage collection is normally performed on startup unless
the -G option is used.
""" % gc_sql)
parser.add_argument(
'-G', '--no-gc', action="store_true",
help="Don't perform garbage collection on startup.")
parser.add_argument(
'--compute-missing', action='store_true',
help="""\
Compute missing newt records.
Rather than processing new records, process records written up through
the current time and stop. Only missing records are updated. This
option requires PostgreSQL 9.5.
This is used to compute newt records after adding Newt DB to an existing
PostgreSQL RelStorage application.
""")
parser.add_argument(
'--nagios',
help="""\
Check the status of the updater.
The status is checked by checking the updater lag, which is the
difference between the last transaction committed to the database, and
the last transaction processed by the updater. The option takes 2
numbers, separated by commas. The first number is the lag, in
seconds, for the updater to be considered to be OK. The second number
is the maximum lag for which the updater isn't considered to be in
error. For example, 1,99 indicates OK if 1 or less, WARNING if more
than 1 and less than or equal to 99 and ERROR of more than 99 seconds.
""")
parser.add_argument(
'-x', '--transform',
help = """\
The dotted name of a function (or callable object) to
transform generated JSON data. This provides a way to control
how your JSON data are generated and also provides a mechanism
for ignoring some objects. See the Newt DB transform option.
""")
def _update_newt(conn, cursor, jsonifier, Binary, batch):
ex = cursor.execute
mogrify = cursor.mogrify
tid = None
while True:
data = list(itertools.islice(batch, 0, 100))
if not data:
break
tid = data[-1][0]
# Delete any existing records for the values. 2 reasons:
# a) Make sire that new invalid data removes old valid data, and
# b) Don't depend on upsert.
ex("delete from newt where zoid = any(%s)", ([d[1] for d in data], ))
# Convert, filtering out null conversions (uninteresting classes)
to_save = []
for tid, zoid, state in data:
class_name, ghost_pickle, state = jsonifier((tid, zoid), state)
if state is not None:
to_save.append((zoid, class_name, Binary(ghost_pickle), state))
if to_save:
ex("insert into newt (zoid, class_name, ghost_pickle, state)"
" values " +
', '.join(mogrify('(%s, %s, %s, %s)', d).decode('ascii')
for d in to_save)
)
if tid is not None:
follow.set_progress_tid(conn, __name__, tid)
conn.commit()
def _compute_missing(conn, cursor, jsonifier, Binary, batch):
ex = cursor.execute
mogrify = cursor.mogrify
tid = None
while True:
data = list(itertools.islice(batch, 0, 100))
if not data:
break
tid = data[-1][0]
# Convert, filtering out null conversions (uninteresting classes)
to_save = []
for tid, zoid, state in data:
class_name, ghost_pickle, state = jsonifier((tid, zoid), state)
if state is not None:
to_save.append((zoid, class_name, Binary(ghost_pickle), state))
if to_save:
ex("insert into newt (zoid, class_name, ghost_pickle, state)"
" values %s on conflict do nothing" %
', '.join(mogrify('(%s, %s, %s, %s)', d).decode('ascii')
for d in to_save)
)
conn.commit()
logging_levels = 'DEBUG INFO WARNING ERROR CRITICAL'.split()
def main(args=None):
options = parser.parse_args(args)
if options.logging_configuration.upper() in logging_levels:
logging.basicConfig(level=options.logging_configuration.upper())
else:
with open(options.logging_configuration) as f:
from ZConfig import configureLoggers
configureLoggers(f.read())
transform = options.transform
if transform is not None:
from .component import global_by_name
transform = global_by_name(transform)
jsonifier = Jsonifier(transform=transform)
driver = relstorage.adapters.postgresql.select_driver(
relstorage.options.Options(driver=options.driver))
Binary = driver.Binary
dsn = options.connection_string
with closing(pg_connection(dsn)) as conn:
with closing(conn.cursor()) as cursor:
if options.nagios:
if not table_exists(cursor, 'newt_follow_progress'):
print("Updater has not run")
return 2
cursor.execute("select max(tid) from object_state")
[[stid]] = cursor
utid = follow.get_progress_tid(conn, __name__)
if stid is None:
if utid == -1:
print("No transactions")
return 0
else:
print("Updater saw data but there was None")
return 2
elif utid < 0:
print("Updater hasn't done anything")
return 2
else:
from ZODB.utils import p64
from ZODB.TimeStamp import TimeStamp
lag = (TimeStamp(p64(stid)).timeTime() -
TimeStamp(p64(utid)).timeTime())
if lag < 0:
print("Updater is ahead")
return 2
warn, error = map(int, options.nagios.split(','))
flag = lambda : ("%99.3f" % lag).strip()
if lag > error:
print("Updater is too far behind | %s" % flag())
return 2
elif lag > warn:
print("Updater is behind | %s" % flag())
return 1
else:
print("OK | %s" % flag())
return 0
compute_missing = options.compute_missing
if (compute_missing and
not table_exists(cursor, follow.PROGRESS_TABLE)
):
if not table_exists(cursor, 'newt'):
raise AssertionError("newt table doesn't exist")
cursor.execute("select max(tid) from object_state")
[[tid]] = cursor
else:
tid = follow.get_progress_tid(conn, __name__)
if tid < 0 and not table_exists(cursor, 'newt'):
from ._adapter import _newt_ddl
cursor.execute(_newt_ddl)
elif trigger_exists(cursor, DELETE_TRIGGER):
if options.remove_delete_trigger:
cursor.execute("drop trigger %s on object_state" %
DELETE_TRIGGER)
else:
logger.error(
"The Newt DB delete trigger exists.\n"
"It is incompatible with the updater.\n"
"Use -T to remove it.")
return 1
if not options.no_gc:
cursor.execute(gc_sql)
conn.commit()
if options.gc_only:
if options.no_gc:
logger.warn(
"Exiting after garbage collection,\n"
"but garbage collection was suppressed.")
return 0
if options.compute_missing:
start_tid = -1
end_tid = tid
logger.info("Compute_missing through %s", tid)
process = _compute_missing
else:
logger.info("Starting updater at %s", tid)
start_tid = tid
end_tid = None
process = _update_newt
for batch in follow.updates(
dsn,
start_tid=start_tid,
end_tid=end_tid,
batch_limit=options.transaction_size_limit,
poll_timeout=options.poll_timeout,
):
process(conn, cursor, jsonifier, Binary, batch)
if __name__ == '__main__':
sys.exit(main())
| 35.760563
| 79
| 0.581824
| 1,198
| 10,156
| 4.809683
| 0.265442
| 0.017182
| 0.032454
| 0.01319
| 0.21555
| 0.180493
| 0.171468
| 0.161055
| 0.147171
| 0.147171
| 0
| 0.008814
| 0.329756
| 10,156
| 283
| 80
| 35.886926
| 0.837667
| 0.026782
| 0
| 0.321888
| 0
| 0
| 0.298322
| 0.007117
| 0
| 0
| 0
| 0
| 0.004292
| 1
| 0.012876
| false
| 0
| 0.072961
| 0
| 0.128755
| 0.038627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00372000fecac8f61456e1d5394b1e5bd0e18f9f
| 3,538
|
py
|
Python
|
medtype-trainer/neleval/scripts/merge_evaluations.py
|
vsocrates/medtype
|
16c6f39d38a73c4c44258bbdf78074a81e07b1c7
|
[
"Apache-2.0"
] | 113
|
2015-01-07T14:12:25.000Z
|
2022-01-21T12:23:57.000Z
|
medtype-trainer/neleval/scripts/merge_evaluations.py
|
vsocrates/medtype
|
16c6f39d38a73c4c44258bbdf78074a81e07b1c7
|
[
"Apache-2.0"
] | 27
|
2015-02-02T02:45:38.000Z
|
2018-09-08T10:33:25.000Z
|
medtype-trainer/neleval/scripts/merge_evaluations.py
|
vsocrates/medtype
|
16c6f39d38a73c4c44258bbdf78074a81e07b1c7
|
[
"Apache-2.0"
] | 24
|
2015-02-16T18:26:48.000Z
|
2021-05-25T13:23:53.000Z
|
#!/usr/bin/env python
"""Merge multiple evaluation files into one with prefixed measure names
If directories are given, and --out-dir, will group by filename.
Example usage:
./scripts/merge_evaluations.py --label-re='[^/]+/?$' -x eval_merged -l =TEDL2015_neleval-no1331 --out-dir /tmp/foobar tac15data/TEDL2015_neleval-no1331 $(find tac15data/TEDL2015_neleval-no1331/00filtered/ -type d )
"""
from __future__ import print_function
import argparse
import os
import glob
import collections
import sys
import re
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('-o', '--out-dir', default=None)
ap.add_argument('-x', '--out-extension', default=None)
ap.add_argument('-l', '--label', dest='labels', action='append',
type=lambda s: s.split('=', 1))
ap.add_argument('-r', '--label-re', default=None, type=re.compile)
ap.add_argument('--fmt', default='{label}/{{}}')
ap.add_argument('paths', nargs='+')
args = ap.parse_args()
def _swap_ext(name, new_ext):
if new_ext is None:
return name
name, ext = os.path.splitext(name)
return name + '.' + new_ext
nonexist = [path for path in args.paths if not os.path.exists(path)]
if nonexist:
ap.error('Paths do not exist: %r' % nonexist)
is_dir = [os.path.isdir(path) for path in args.paths]
if all(is_dir):
if args.out_dir is None:
ap.error('Must specify --out-dir in path mode')
input_paths = collections.defaultdict(list)
for dir_path in args.paths:
for path in glob.glob(os.path.join(dir_path, '*.evaluation')):
input_paths[os.path.basename(path)].append(path)
outputs = {name: os.path.join(args.out_dir,
_swap_ext(name, args.out_extension))
for name in input_paths}
elif not any(is_dir):
if args.out_dir is not None or args.out_extension is not None:
ap.error('--out-dir and --out-extension not used in files mode; output is STDOUT')
input_paths = {'all': args.paths}
outputs = {'all': sys.stdout}
else:
ap.error('Got mixture of directories (e.g. %r) and files (e.g. %r)' % (args.paths[is_dir.index(True)], args.paths[is_dir.index(False)]))
seen_labels = set()
labels = {src: dst for dst, src in args.labels or []}
def get_label(path):
name = os.path.dirname(path)
if args.label_re:
match = args.label_re.search(name)
if match is not None:
name = match.group()
seen_labels.add(name)
return labels.get(name, name)
for name in input_paths:
fout = outputs[name]
if not hasattr(fout, 'read'):
opened = True
fout = open(fout, 'w')
else:
opened = False
print('Processing', name, 'to', fout.name, file=sys.stderr)
for i, path in enumerate(input_paths[name]):
label = get_label(path)
if label:
fmt = args.fmt.format(label=label)
else:
fmt = '{}'
fmt = '{{}}\t{}'.format(fmt)
with open(path) as fin:
fin = iter(fin)
try:
header = next(fin)
except StopIteration:
print('Found empty file at', path, file=sys.stderr)
if i == 0:
fout.write(header)
for l in fin:
l, measure = l.rstrip('\n\r').rsplit('\t', 1)
print(fmt.format(l, measure), file=fout)
if opened:
fout.close()
unseen_labels = set(labels) - seen_labels
if unseen_labels:
print('WARNING: did not see labels %r' % sorted(unseen_labels), file=sys.stderr)
| 34.349515
| 218
| 0.623799
| 511
| 3,538
| 4.209393
| 0.328767
| 0.022315
| 0.036262
| 0.020921
| 0.097629
| 0.039981
| 0.039981
| 0
| 0
| 0
| 0
| 0.012182
| 0.234313
| 3,538
| 102
| 219
| 34.686275
| 0.781838
| 0.110232
| 0
| 0.036145
| 0
| 0.012048
| 0.117479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.084337
| 0
| 0.144578
| 0.060241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00373487473f078500f863d310d10f7cf34f3397
| 11,276
|
py
|
Python
|
IQA_BIECON_release/models/BIECON_base.py
|
lionzhnn/IQA_BIECON_release
|
9b9452681460cbd3b670aff62f18c6661a724997
|
[
"MIT"
] | 96
|
2017-07-25T07:54:59.000Z
|
2022-01-09T03:33:07.000Z
|
IQA_BIECON_release/models/BIECON_base.py
|
lionzhnn/IQA_BIECON_release
|
9b9452681460cbd3b670aff62f18c6661a724997
|
[
"MIT"
] | 4
|
2018-04-25T09:46:05.000Z
|
2019-11-08T12:44:39.000Z
|
IQA_BIECON_release/models/BIECON_base.py
|
jongyookim/IQA_BIECON_release
|
9b9452681460cbd3b670aff62f18c6661a724997
|
[
"MIT"
] | 35
|
2017-07-25T02:51:22.000Z
|
2022-02-05T03:05:40.000Z
|
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import theano.tensor as T
from .model_basis import ModelBasis
from .model_record import Record
from ..layers import layers
class Model(ModelBasis):
def __init__(self, model_config, rng=None):
super(Model, self).__init__(model_config, rng)
self.set_configs(model_config)
self.layers['feat'] = []
self.layers['feat_fc'] = []
self.layers['reg_loc'] = []
self.layers['reg_mos'] = []
print('\nBIECON base model')
print(' - Model file: %s' % (os.path.split(__file__)[1]))
self.init_model()
def set_configs(self, model_config):
self.set_opt_configs(model_config)
self.wl_loc = float(model_config.get('wl_loc', 1e2))
self.wl_mos = float(model_config.get('wl_mos', 1e2))
self.wr_l2 = float(model_config.get('wr_l2', 1e-4))
self.dropout = model_config.get('use_dropout', False)
self.update_wrt_loc = model_config.get(
'update_wrt_loc', ['feat', 'feat_fc', 'reg_loc'])
self.update_wrt_iqa = model_config.get(
'update_wrt_iqa', ['feat', 'feat_fc', 'reg_mos'])
def init_model(self):
print(' - Feature conv layers')
cur_key = 'feat'
self.layers[cur_key] = []
# Conv. layers
self.layers[cur_key].append(layers.ConvLayer(
input_shape=self.get_input_shape(),
num_filts=64,
filt_size=(5, 5),
layer_name=cur_key + '/conv1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.Pool2DLayer(
input_shape=self.get_out_shape(cur_key),
pool_size=(2, 2), mode='max'))
self.layers[cur_key].append(layers.ConvLayer(
input_shape=self.get_out_shape(cur_key),
num_filts=64,
filt_size=(5, 5),
layer_name=cur_key + '/conv2',
activation=layers.relu,
))
self.layers[cur_key].append(layers.Pool2DLayer(
input_shape=self.get_out_shape(cur_key),
pool_size=(2, 2), mode='max'))
# Reshaping layer
self.layers[cur_key].append(
layers.TensorToVectorLayer(self.get_out_shape(cur_key)))
# Fully connected layers
cur_key = 'feat_fc'
self.layers[cur_key] = []
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat'),
n_out=1024,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=512,
layer_name=cur_key + '/fc2',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=256,
layer_name=cur_key + '/fc3',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=128,
layer_name=cur_key + '/fc4',
activation=layers.relu,
))
#######################################################################
print(' - Regression metric layers')
cur_key = 'reg_loc'
self.layers[cur_key] = []
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=128,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=1,
layer_name=cur_key + '/fc2',
b_init=np.ones((1,), dtype='float32') * 0.5,
))
#######################################################################
print(' - Regression mos layers')
cur_key = 'reg_mos'
self.layers[cur_key] = []
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=128,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=1,
layer_name=cur_key + '/fc2',
b_init=np.ones((1,), dtype='float32') * 0.5,
))
#######################################################################
super(Model, self).make_param_list()
super(Model, self).show_num_params()
def aggregation_fn(self, feat_vec):
feat_avg = T.mean(feat_vec, axis=0, keepdims=True)
return feat_avg
# feat_std = T.std(feat_vec, axis=0, keepdims=True)
# return T.concatenate([feat_avg, feat_std], axis=1)
def feat_fn(self, x):
out = self.get_key_layers_output(x, 'feat')
return self.get_key_layers_output(out, 'feat_fc')
def regress_loc_fn(self, feat_vec):
return self.get_key_layers_output(feat_vec, 'reg_loc')
def regress_mos_fn(self, feat_vec):
return self.get_key_layers_output(feat_vec, 'reg_mos')
def cost_reg_loc(self, x_c, met_s, n_img=None, bat2img_idx_set=None):
"""Get cost: regression onto local metroc scores
"""
records = Record()
# concatenate the image patches
if bat2img_idx_set:
# if dummy data with fixed size is given and current data is
# overwritten on dummy data with size of n_patches,
# pick current dataset with size of n_patches
n_patches = bat2img_idx_set[n_img - 1][1]
x_c_set = x_c[:n_patches]
met_s_set = met_s[:n_patches]
else:
# if input is current data
x_c_set = x_c
met_s_set = met_s
######################################################################
x_c_im = self.image_vec_to_tensor(x_c_set)
met_s_im = self.image_vec_to_tensor(met_s_set)
feat_vec = self.feat_fn(x_c_im)
met_s_p = self.regress_loc_fn(feat_vec).flatten()
met_s_mean = T.mean(met_s_set, axis=[1, 2, 3])
loc_cost = self.get_cost_mse_mae(met_s_mean, met_s_p)
# regularization
l2_reg = self.get_l2_regularization(
['feat', 'feat_fc', 'reg_loc'], mode='sum')
cost = self.add_all_losses_with_weight(
[loc_cost, l2_reg],
[self.wl_loc, self.wr_l2])
# Parameters to record
records.add_data('loc_mse', self.wl_loc * loc_cost)
records.add_data('l2_reg', self.wr_l2 * l2_reg)
# records.add_im_data('met_s_p', met_s_p_set)
# records.add_im_data('met_s', met_s_set)
records.add_imgs('x_c', x_c_im, caxis=[-0.25, 0.25])
if bat2img_idx_set:
def score_to_img(score, repeat=1):
tmp = score.dimshuffle(0, 'x', 'x', 'x')
tmp = T.extra_ops.repeat(tmp, repeat, axis=2)
return T.extra_ops.repeat(tmp, repeat, axis=3)
met_s_img = score_to_img(met_s_mean, 10)
records.add_imgs('met_s', met_s_img, caxis='auto')
met_s_p_img = score_to_img(met_s_p, 10)
records.add_imgs('met_s_p', met_s_p_img, caxis='auto')
return cost, records
def cost_updates_reg_loc(self, x_c, met_s,
n_img=None, bat2img_idx_set=None):
cost, records = self.cost_reg_loc(
x_c, met_s, n_img=n_img, bat2img_idx_set=bat2img_idx_set)
updates = self.get_updates_keys(cost, self.update_wrt_loc)
return cost, updates, records
def cost_nr_iqa(self, x_c, mos, n_img=None, bat2img_idx_set=None):
records = Record()
# concatenate the image patches
if bat2img_idx_set:
# if dummy data with fixed size is given and current data is
# overwritten on dummy data with size of n_patches,
# pick current dataset with size of n_patches
n_patches = bat2img_idx_set[n_img - 1][1]
x_c_set = x_c[:n_patches]
else:
# if input is current data
x_c_set = x_c
######################################################################
x_c_im = self.image_vec_to_tensor(x_c_set)
# x_c_im = normalize_lowpass_subt(x_c_im, 3)
feat_vec = self.feat_fn(x_c_im)
# get feature vector and concatenate the mos_p set
if bat2img_idx_set:
# if patch based
aggr_feat_list = []
for idx in range(n_img):
idx_from = bat2img_idx_set[idx][0]
idx_to = bat2img_idx_set[idx][1]
cur_feat_vec = feat_vec[idx_from: idx_to]
cur_aggr_feat = self.aggregation_fn(cur_feat_vec)
aggr_feat_list.append(cur_aggr_feat)
aggr_feat = T.concatenate(aggr_feat_list, axis=0).flatten(2)
# aggr_feat = T.stack(aggr_feat_list).flatten()
else:
# aggr_feat = self.regress_mos_fn(feat_vec).flatten()
raise NotImplementedError
######################################################################
# regress onto MOS
mos_p = self.regress_mos_fn(aggr_feat).flatten()
# MOS loss
subj_loss = self.get_cost_mse_mae(mos, mos_p)
# L2 regularization
l2_reg = self.get_l2_regularization(
['feat', 'feat_fc', 'reg_mos'], mode='sum')
cost = self.add_all_losses_with_weight(
[subj_loss, l2_reg],
[self.wl_mos, self.wr_l2])
# Parameters to record
records.add_data('subj', self.wl_mos * subj_loss)
records.add_data('l2_reg', self.wr_l2 * l2_reg)
records.add_im_data('mos_p', mos_p)
records.add_im_data('mos_gt', mos)
records.add_imgs('x_c', x_c_im, caxis=[-0.25, 0.25])
return cost, records
def cost_updates_nr_iqa(self, x_c, mos, n_img=None, bat2img_idx_set=None):
cost, records = self.cost_nr_iqa(
x_c, mos, n_img=n_img, bat2img_idx_set=bat2img_idx_set)
updates = self.get_updates_keys(cost, self.update_wrt_iqa)
return cost, updates, records
def set_training_mode(self, training):
# Decide behaviors of the model during training
# Dropout
self.set_dropout_on(training)
| 35.796825
| 80
| 0.55321
| 1,492
| 11,276
| 3.845845
| 0.134048
| 0.046009
| 0.054374
| 0.061345
| 0.641513
| 0.585396
| 0.537121
| 0.513768
| 0.500871
| 0.474033
| 0
| 0.01785
| 0.304452
| 11,276
| 314
| 81
| 35.910828
| 0.713757
| 0.094714
| 0
| 0.492891
| 0
| 0
| 0.049809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061611
| false
| 0
| 0.033175
| 0.009479
| 0.14218
| 0.028436
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00388d39e13165d5d68a97cebd954314afba1ff8
| 908
|
py
|
Python
|
pi4_software/Examples/WSClient.py
|
stuckatmarine/srauv-sim
|
30f4bae5d22a4529233ffa2705d7631d048a8130
|
[
"MIT"
] | 1
|
2020-11-01T13:39:42.000Z
|
2020-11-01T13:39:42.000Z
|
pi4_software/Examples/WSClient.py
|
stuckatmarine/srauv-sim
|
30f4bae5d22a4529233ffa2705d7631d048a8130
|
[
"MIT"
] | null | null | null |
pi4_software/Examples/WSClient.py
|
stuckatmarine/srauv-sim
|
30f4bae5d22a4529233ffa2705d7631d048a8130
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# WS client example to test server
import asyncio
import websockets
import json
import time
async def hello():
uri = "ws://localhost:8001"
async with websockets.connect(uri) as websocket:
inp = input("Input msg number? ")
obj = {
"source" : "sim",
"msgNum" : inp,
"msgType" : "telemetry",
"timestamp" : time.strftime("%Y-%m-%d %H:%M.%S"),
"cardDist" : [6.0,7.0,8.0,9.0],
"depth" : 10.0,
"alt" : 11.0,
"assetDistances" :
{
"cage" : 12.0,
"tree1" : 13.0,
"tree2" : 14.0
}
}
msg = json.dumps(obj)
await websocket.send(msg)
print(f"> {msg}")
resp = await websocket.recv()
print(f"< {resp}")
asyncio.get_event_loop().run_until_complete(hello())
| 24.540541
| 61
| 0.480176
| 104
| 908
| 4.153846
| 0.692308
| 0.064815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050435
| 0.36674
| 908
| 37
| 62
| 24.540541
| 0.70087
| 0.05837
| 0
| 0
| 0
| 0
| 0.179157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137931
| 0
| 0.137931
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00389d2165f30aa4a1d559c1fcdd0bbb4c1ad957
| 1,834
|
py
|
Python
|
scripts/trends.py
|
iamDyeus/KnickAI
|
c17d808c949cb3467031498e7252bd2095c04699
|
[
"MIT"
] | 31
|
2021-11-08T18:42:17.000Z
|
2022-03-25T07:45:46.000Z
|
scripts/trends.py
|
iamDyeus/KnickAI
|
c17d808c949cb3467031498e7252bd2095c04699
|
[
"MIT"
] | 6
|
2021-12-20T14:15:44.000Z
|
2022-03-28T16:19:12.000Z
|
scripts/trends.py
|
iamDyeus/KnickAI
|
c17d808c949cb3467031498e7252bd2095c04699
|
[
"MIT"
] | 3
|
2021-11-13T09:38:12.000Z
|
2022-03-25T07:44:17.000Z
|
#NOT YET WORKING PROPERLY
#NOT INTEGRATED WITH THE ASSISTANT YET
from pytrends.request import TrendReq
# Only need to run this once, the rest of requests will use the same session.
pytrend = TrendReq()
def trending_searches():
# Get Google Hot Trends data
trending_searches = pytrend.trending_searches()
print(trending_searches.head())
def todays_trends():
# Get Google Today's Trend data
today_searches = pytrend.today_searches()
print(today_searches.head())
def top_charts():
# Get Google Top Charts
top_charts = pytrend.top_charts(2018, hl='en-US', tz=300, geo='GLOBAL')
print(top_charts.head())
def keyword_suggestions():
# Get Google Keyword Suggestions
kw=input("please enter the keyword you want to search:")
suggestions_dict = pytrend.suggestions(keyword=kw)
if suggestions_dict==[]:
print("No suggestions found for the keyword: " + kw)
else:
print(suggestions_dict)
def console_trends():
print("Available Google Trends Research Options :\n1. Trending Searches\n2. Today's Trends\n3. Top Charts\n4. Keyword Suggestions\n\n type Exit to leave the trends Research console")
while True:
choice = input("\n\nPlease enter your choice: ")
if choice == "1" or choice == "Trending Searches": trending_searches()
elif choice == "2" or choice == "Today's Trends": todays_trends()
elif choice == "3" or choice == "Top Charts":
top_charts()
elif choice == "4" or choice == "Keyword Suggestions": keyword_suggestions()
elif choice == "Exit" or choice == "exit" or choice == "quit": break
else:
print("\n\nInvalid choice, please try again, Dumbass!")
continue
if __name__ == '__main__':
console_trends()
| 35.960784
| 187
| 0.657034
| 232
| 1,834
| 5.064655
| 0.422414
| 0.061277
| 0.025532
| 0.030638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010737
| 0.238277
| 1,834
| 51
| 188
| 35.960784
| 0.830351
| 0.134678
| 0
| 0.060606
| 0
| 0.030303
| 0.278431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.030303
| 0
| 0.181818
| 0.212121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00394b6eff0f3c670ab13b288147b361f3297dc6
| 4,902
|
py
|
Python
|
Speedo/helpers/convert.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/helpers/convert.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/helpers/convert.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | 3
|
2021-10-12T08:17:01.000Z
|
2021-12-21T01:17:54.000Z
|
import os
import asyncio
import re
import requests
import time
import lottie
import PIL.ImageOps
from os.path import basename
from PIL import Image
from typing import Optional
from .. import LOGS
from ..config import Config
from ..utils.extras import edit_or_reply as eor
from .progress import *
from .runner import runcmd
dwlpath = Config.TMP_DOWNLOAD_DIRECTORY
# convertions are done here...
# make a image
async def convert_to_image(event, bot):
speedo = await event.get_reply_message()
if not (
speedo.gif
or speedo.audio
or speedo.voice
or speedo.video
or speedo.video_note
or speedo.photo
or speedo.sticker
or speedo.media
):
await eor(event, "`Format Not Supported.`")
return
else:
try:
c_time = time.time()
downloaded_file_name = await bot.download_media(
speedo.media,
dwlpath,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "`Downloading...`")
),
)
except Exception as e: # pylint:disable=C0103,W0703
await eor(event, str(e))
else:
await eor(event,
"Downloaded to `{}` successfully.".format(downloaded_file_name)
)
if not os.path.exists(downloaded_file_name):
await eor(event, "Download Unsucessfull :(")
return
if speedo and speedo.photo:
speedo_final = downloaded_file_name
elif speedo.sticker and speedo.sticker.mime_type == "application/x-tgsticker":
rpath = downloaded_file_name
image_name20 = os.path.join(dwlpath, "omk.png")
cmd = f"lottie_convert.py --frame 0 -if lottie -of png {downloaded_file_name} {image_name20}"
stdout, stderr = (await runcmd(cmd))[:2]
os.remove(rpath)
speedo_final = image_name20
elif speedo.sticker and speedo.sticker.mime_type == "image/webp":
pathofsticker2 = downloaded_file_name
image_new_path = dwlpath + "image.png"
im = Image.open(pathofsticker2)
im.save(image_new_path, "PNG")
if not os.path.exists(image_new_path):
await eor(event, "`Unable To Fetch Shot.`")
return
speedo_final = image_new_path
elif speedo.audio:
omk_p = downloaded_file_name
hmmyes = dwlpath + "semx.mp3"
imgpath = dwlpath + "semxy.jpg"
os.rename(omk_p, hmmyes)
await runcmd(f"ffmpeg -i {hmmyes} -filter:v scale=500:500 -an {imgpath}")
os.remove(omk_p)
if not os.path.exists(imgpath):
await eor(event, "`Unable To Fetch Shot.`")
return
speedo_final = imgpath
elif speedo.gif or speedo.video or speedo.video_note:
omk_p2 = downloaded_file_name
jpg_file = os.path.join(dwlpath, "image.jpg")
await take_screen_shot(omk_p2, 0, jpg_file)
os.remove(omk_p2)
if not os.path.exists(jpg_file):
await eor(event, "`Couldn't Fetch shot`")
return
speedo_final = jpg_file
return speedo_final
async def take_ss(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
LOGS.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join(dwlpath, f"{basename(video_file)}.jpg")
command = f'''ffmpeg -ss {ttl} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
LOGS.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
def tgs_to_gif(sticker_path: str, quality: int = 256) -> str:
semx = os.path.join(dwlpath, "Speedotgs.gif")
with open(semx, 'wb') as t_g:
lottie.exporters.gif.export_gif(lottie.parsers.tgs.parse_tgs(sticker_path), t_g, quality, 1)
os.remove(sticker_path)
return semx
# deal with it...
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
def deEmojify(inputString: str) -> str:
"""Remove emojis and other non-safe characters from string"""
return re.sub(EMOJI_PATTERN, "", inputString)
# Speedo
| 33.346939
| 101
| 0.625663
| 611
| 4,902
| 4.870704
| 0.346972
| 0.020161
| 0.054435
| 0.014785
| 0.130376
| 0.079301
| 0.079301
| 0.05914
| 0.031586
| 0.031586
| 0
| 0.047075
| 0.267646
| 4,902
| 146
| 102
| 33.575342
| 0.781894
| 0.077111
| 0
| 0.071429
| 0
| 0
| 0.172774
| 0.067066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.119048
| 0
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
003c4c33a5e695c65aafeb6d426e17f7228d37ef
| 12,953
|
py
|
Python
|
assemblyline_service_server/api/v1/task.py
|
CybercentreCanada/assemblyline-service-server
|
f4fbc7dcab122fa63fcc598db1c23a770c10145f
|
[
"MIT"
] | 6
|
2020-06-29T14:32:24.000Z
|
2022-01-03T19:40:39.000Z
|
assemblyline_service_server/api/v1/task.py
|
CybercentreCanada/assemblyline-service-server
|
f4fbc7dcab122fa63fcc598db1c23a770c10145f
|
[
"MIT"
] | null | null | null |
assemblyline_service_server/api/v1/task.py
|
CybercentreCanada/assemblyline-service-server
|
f4fbc7dcab122fa63fcc598db1c23a770c10145f
|
[
"MIT"
] | 2
|
2021-01-15T18:31:17.000Z
|
2021-05-29T15:57:08.000Z
|
import time
from typing import cast, Dict, Any
from flask import request
from assemblyline.common import forge
from assemblyline.common.constants import SERVICE_STATE_HASH, ServiceStatus
from assemblyline.common.dict_utils import flatten, unflatten
from assemblyline.common.forge import CachedObject
from assemblyline.common.heuristics import HeuristicHandler, InvalidHeuristicException
from assemblyline.common.isotime import now_as_iso
from assemblyline.odm import construct_safe
from assemblyline.odm.messages.service_heartbeat import Metrics
from assemblyline.odm.messages.task import Task as ServiceTask
from assemblyline.odm.models.error import Error
from assemblyline.odm.models.heuristic import Heuristic
from assemblyline.odm.models.result import Result
from assemblyline.odm.models.tagging import Tagging
from assemblyline.remote.datatypes.exporting_counter import export_metrics_once
from assemblyline.remote.datatypes.hash import ExpiringHash
from assemblyline_core.dispatching.client import DispatchClient
from assemblyline_service_server.api.base import make_subapi_blueprint, make_api_response, api_login
from assemblyline_service_server.config import FILESTORE, LOGGER, STORAGE, config
from assemblyline_service_server.helper.heuristics import get_heuristics
status_table = ExpiringHash(SERVICE_STATE_HASH, ttl=60*30)
dispatch_client = DispatchClient(STORAGE)
heuristics = cast(Dict[str, Heuristic], CachedObject(get_heuristics, refresh=300))
heuristic_hander = HeuristicHandler(STORAGE)
tag_safelister = CachedObject(forge.get_tag_safelister,
kwargs=dict(log=LOGGER, config=config, datastore=STORAGE),
refresh=300)
SUB_API = 'task'
task_api = make_subapi_blueprint(SUB_API, api_version=1)
task_api._doc = "Perform operations on service tasks"
@task_api.route("/", methods=["GET"])
@api_login()
def get_task(client_info):
"""
Header:
{'container_id': abcd...123
'service_name': 'Extract',
'service_version': '4.0.1',
'service_tool_version': '
'timeout': '30'}
Result example:
{'keep_alive': true}
"""
service_name = client_info['service_name']
service_version = client_info['service_version']
service_tool_version = client_info['service_tool_version']
client_id = client_info['client_id']
remaining_time = timeout = int(float(request.headers.get('timeout', 30)))
try:
service_data = dispatch_client.service_data[service_name]
except KeyError:
return make_api_response({}, "The service you're asking task for does not exist, try later", 404)
start_time = time.time()
stats = {
"execute": 0,
"cache_miss": 0,
"cache_hit": 0,
"cache_skipped": 0,
"scored": 0,
"not_scored": 0
}
try:
while remaining_time > 0:
cache_found = False
# Set the service status to Idle since we will be waiting for a task
status_table.set(client_id, (service_name, ServiceStatus.Idle, start_time + timeout))
# Getting a new task
task = dispatch_client.request_work(client_id, service_name, service_version, timeout=remaining_time)
if not task:
# We've reached the timeout and no task found in service queue
return make_api_response(dict(task=False))
# We've got a task to process, consider us busy
status_table.set(client_id, (service_name, ServiceStatus.Running, time.time() + service_data.timeout))
stats['execute'] += 1
result_key = Result.help_build_key(sha256=task.fileinfo.sha256,
service_name=service_name,
service_version=service_version,
service_tool_version=service_tool_version,
is_empty=False,
task=task)
# If we are allowed, try to see if the result has been cached
if not task.ignore_cache and not service_data.disable_cache:
# Checking for previous results for this key
result = STORAGE.result.get_if_exists(result_key)
if result:
stats['cache_hit'] += 1
if result.result.score:
stats['scored'] += 1
else:
stats['not_scored'] += 1
result.archive_ts = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
result.expiry_ts = now_as_iso(task.ttl * 24 * 60 * 60)
dispatch_client.service_finished(task.sid, result_key, result)
cache_found = True
if not cache_found:
# Checking for previous empty results for this key
result = STORAGE.emptyresult.get_if_exists(f"{result_key}.e")
if result:
stats['cache_hit'] += 1
stats['not_scored'] += 1
result = STORAGE.create_empty_result_from_key(result_key)
dispatch_client.service_finished(task.sid, f"{result_key}.e", result)
cache_found = True
if not cache_found:
stats['cache_miss'] += 1
else:
stats['cache_skipped'] += 1
if not cache_found:
# No luck with the cache, lets dispatch the task to a client
return make_api_response(dict(task=task.as_primitives()))
# Recalculating how much time we have left before we reach the timeout
remaining_time = start_time + timeout - time.time()
# We've been processing cache hit for the length of the timeout... bailing out!
return make_api_response(dict(task=False))
finally:
export_metrics_once(service_name, Metrics, stats, host=client_id, counter_type='service')
@task_api.route("/", methods=["POST"])
@api_login()
def task_finished(client_info):
"""
Header:
{'container_id': abcd...123
'service_name': 'Extract',
'service_version': '4.0.1',
'service_tool_version': '
}
Data Block:
{
"exec_time": 300,
"task": <Original Task Dict>,
"result": <AL Result Dict>,
"freshen": true
}
"""
data = request.json
exec_time = data.get('exec_time')
try:
task = ServiceTask(data['task'])
if 'result' in data: # Task created a result
missing_files = handle_task_result(exec_time, task, data['result'], client_info, data['freshen'])
if missing_files:
return make_api_response(dict(success=False, missing_files=missing_files))
return make_api_response(dict(success=True))
elif 'error' in data: # Task created an error
error = data['error']
handle_task_error(exec_time, task, error, client_info)
return make_api_response(dict(success=True))
else:
return make_api_response("", "No result or error provided by service.", 400)
except ValueError as e: # Catch errors when building Task or Result model
return make_api_response("", e, 400)
def handle_task_result(exec_time: int, task: ServiceTask, result: Dict[str, Any], client_info: Dict[str, str],
freshen: bool):
archive_ts = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
expiry_ts = now_as_iso(task.ttl * 24 * 60 * 60)
else:
expiry_ts = None
# Check if all files are in the filestore
if freshen:
missing_files = []
for f in result['response']['extracted'] + result['response']['supplementary']:
cur_file_info = STORAGE.file.get_if_exists(f['sha256'], as_obj=False)
if cur_file_info is None or not FILESTORE.exists(f['sha256']):
missing_files.append(f['sha256'])
else:
cur_file_info['archive_ts'] = archive_ts
cur_file_info['expiry_ts'] = expiry_ts
cur_file_info['classification'] = f['classification']
STORAGE.save_or_freshen_file(f['sha256'], cur_file_info,
cur_file_info['expiry_ts'], cur_file_info['classification'],
is_section_image=f.get('is_section_image', False))
if missing_files:
return missing_files
service_name = client_info['service_name']
client_id = client_info['client_id']
# Add scores to the heuristics, if any section set a heuristic
total_score = 0
for section in result['result']['sections']:
zeroize_on_sig_safe = section.pop('zeroize_on_sig_safe', True)
section['tags'] = flatten(section['tags'])
if section.get('heuristic'):
heur_id = f"{client_info['service_name'].upper()}.{str(section['heuristic']['heur_id'])}"
section['heuristic']['heur_id'] = heur_id
try:
section['heuristic'], new_tags = heuristic_hander.service_heuristic_to_result_heuristic(
section['heuristic'], heuristics, zeroize_on_sig_safe)
for tag in new_tags:
section['tags'].setdefault(tag[0], [])
if tag[1] not in section['tags'][tag[0]]:
section['tags'][tag[0]].append(tag[1])
total_score += section['heuristic']['score']
except InvalidHeuristicException:
section['heuristic'] = None
# Update the total score of the result
result['result']['score'] = total_score
# Add timestamps for creation, archive and expiry
result['created'] = now_as_iso()
result['archive_ts'] = archive_ts
result['expiry_ts'] = expiry_ts
# Pop the temporary submission data
temp_submission_data = result.pop('temp_submission_data', None)
# Process the tag values
for section in result['result']['sections']:
# Perform tag safelisting
tags, safelisted_tags = tag_safelister.get_validated_tag_map(section['tags'])
section['tags'] = unflatten(tags)
section['safelisted_tags'] = safelisted_tags
section['tags'], dropped = construct_safe(Tagging, section.get('tags', {}))
# Set section score to zero and lower total score if service is set to zeroize score
# and all tags were safelisted
if section.pop('zeroize_on_tag_safe', False) and \
section.get('heuristic') and \
len(tags) == 0 and \
len(safelisted_tags) != 0:
result['result']['score'] -= section['heuristic']['score']
section['heuristic']['score'] = 0
if dropped:
LOGGER.warning(f"[{task.sid}] Invalid tag data from {client_info['service_name']}: {dropped}")
result = Result(result)
result_key = result.build_key(service_tool_version=result.response.service_tool_version, task=task)
dispatch_client.service_finished(task.sid, result_key, result, temp_submission_data)
# Metrics
if result.result.score > 0:
export_metrics_once(service_name, Metrics, dict(scored=1), host=client_id, counter_type='service')
else:
export_metrics_once(service_name, Metrics, dict(not_scored=1), host=client_id, counter_type='service')
LOGGER.info(f"[{task.sid}] {client_info['client_id']} - {client_info['service_name']} "
f"successfully completed task {f' in {exec_time}ms' if exec_time else ''}")
def handle_task_error(exec_time: int, task: ServiceTask, error: Dict[str, Any], client_info: Dict[str, str]) -> None:
service_name = client_info['service_name']
client_id = client_info['client_id']
LOGGER.info(f"[{task.sid}] {client_info['client_id']} - {client_info['service_name']} "
f"failed to complete task {f' in {exec_time}ms' if exec_time else ''}")
# Add timestamps for creation, archive and expiry
error['created'] = now_as_iso()
error['archive_ts'] = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
error['expiry_ts'] = now_as_iso(task.ttl * 24 * 60 * 60)
error = Error(error)
error_key = error.build_key(service_tool_version=error.response.service_tool_version, task=task)
dispatch_client.service_failed(task.sid, error_key, error)
# Metrics
if error.response.status == 'FAIL_RECOVERABLE':
export_metrics_once(service_name, Metrics, dict(fail_recoverable=1), host=client_id, counter_type='service')
else:
export_metrics_once(service_name, Metrics, dict(fail_nonrecoverable=1), host=client_id, counter_type='service')
| 42.608553
| 119
| 0.63576
| 1,583
| 12,953
| 4.968414
| 0.18446
| 0.032168
| 0.019072
| 0.024031
| 0.31583
| 0.275397
| 0.221742
| 0.188938
| 0.138843
| 0.114558
| 0
| 0.012787
| 0.263414
| 12,953
| 303
| 120
| 42.749175
| 0.81155
| 0.115649
| 0
| 0.211823
| 0
| 0
| 0.123366
| 0.019075
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019704
| false
| 0
| 0.108374
| 0
| 0.17734
| 0.009852
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
003c669ec9b5a285ed0283853c1db390654e252e
| 3,627
|
py
|
Python
|
install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/global_search_result_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/global_search_result_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/global_search_result_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore
from .search_result_delegate import SearchResultDelegate
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
views = sgtk.platform.current_bundle().import_module("views")
class GlobalSearchResultDelegate(SearchResultDelegate):
"""
Delegate which renders search match entries in the global
search completer.
"""
def _render_result(self, widget, model_index):
"""
Renders a result from the model into the provided widget.
:param widget: Widget used to render the result.
:type widget: ``SearchResultWidget``
:param model_index: Index of the item to render.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
from .global_search_completer import GlobalSearchCompleter
icon = shotgun_model.get_sanitized_data(model_index, QtCore.Qt.DecorationRole)
if icon:
thumb = icon.pixmap(512)
widget.set_thumbnail(thumb)
else:
# probably won't hit here, but just in case, use default/empty
# thumbnail
widget.set_thumbnail(self._pixmaps.no_thumbnail)
data = shotgun_model.get_sanitized_data(model_index, GlobalSearchCompleter.SG_DATA_ROLE)
# Example of data stored in the data role:
# {'status': 'vwd',
# 'name': 'bunny_010_0050_comp_v001',
# 'links': ['Shot', 'bunny_010_0050'],
# 'image': 'https://xxx',
# 'project_id': 65,
# 'type': 'Version',
# 'id': 99}
entity_type_display_name = shotgun_globals.get_type_display_name(data["type"])
content = ""
et_url = shotgun_globals.get_entity_type_icon_url(data["type"])
underlined_name = self._underline_search_term(data["name"])
if et_url:
# present thumbnail icon and name
content += "<img src='%s'/> <b style='color: rgb(48, 167, 227)';>%s</b>" % (
et_url, underlined_name
)
else:
# present type name name
content += "%s" % underlined_name
content += "<br>%s" % entity_type_display_name
links = data["links"]
# note users return weird data so ignore it.
if links and links[0] != "" and links[0] != "HumanUser" and links[0] != "ClientUser":
underlined_link = self._underline_search_term(links[1])
# there is a referenced entity
et_url = shotgun_globals.get_entity_type_icon_url(links[0])
if et_url:
# present thumbnail icon and name
content += " on <img align=absmiddle src='%s'/> %s" % (et_url, underlined_link)
else:
# present type name name
link_entity_type = links[0]
content += " on %s %s" % (shotgun_globals.get_type_display_name(link_entity_type), underlined_link)
widget.set_text(content)
| 38.585106
| 115
| 0.650675
| 441
| 3,627
| 5.156463
| 0.401361
| 0.036939
| 0.026385
| 0.01759
| 0.240985
| 0.220756
| 0.192612
| 0.159191
| 0.070361
| 0
| 0
| 0.015504
| 0.253102
| 3,627
| 93
| 116
| 39
| 0.82392
| 0.355942
| 0
| 0.135135
| 0
| 0.027027
| 0.109571
| 0.033095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.189189
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
003e8e2269ee664c19971a3c8f0ad03c45a56847
| 2,485
|
py
|
Python
|
preprocessing/generate_diffs.py
|
windysavage/dfdc_deepfake_challenge
|
d10b54cf933282366157a031954b046d87d57009
|
[
"MIT"
] | null | null | null |
preprocessing/generate_diffs.py
|
windysavage/dfdc_deepfake_challenge
|
d10b54cf933282366157a031954b046d87d57009
|
[
"MIT"
] | null | null | null |
preprocessing/generate_diffs.py
|
windysavage/dfdc_deepfake_challenge
|
d10b54cf933282366157a031954b046d87d57009
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from preprocessing.utils import get_original_with_fakes
from tqdm import tqdm
from multiprocessing.pool import Pool
from functools import partial
# from skimage.measure import compare_ssim
from skimage import metrics
import argparse
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
cache = {}
def save_diffs(pair, root_dir):
ori_id, fake_id = pair
ori_dir = os.path.join(root_dir, "crops", ori_id)
fake_dir = os.path.join(root_dir, "crops", fake_id)
diff_dir = os.path.join(root_dir, "diffs", fake_id)
os.makedirs(diff_dir, exist_ok=True)
for frame in range(320):
if frame % 10 != 0:
continue
for actor in range(2):
image_id = "{}_{}.png".format(frame, actor)
diff_image_id = "{}_{}_diff.png".format(frame, actor)
ori_path = os.path.join(ori_dir, image_id)
fake_path = os.path.join(fake_dir, image_id)
diff_path = os.path.join(diff_dir, diff_image_id)
# some frames didn't exist...
if os.path.exists(ori_path) and os.path.exists(fake_path):
img1 = cv2.imread(ori_path, cv2.IMREAD_COLOR)
img2 = cv2.imread(fake_path, cv2.IMREAD_COLOR)
try:
d, a = metrics.structural_similarity(
img1, img2, multichannel=True, full=True)
a = 1 - a
diff = (a * 255).astype(np.uint8)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
cv2.imwrite(diff_path, diff)
except Exception as e:
print(e)
def parse_args():
parser = argparse.ArgumentParser(
description="Extract image diffs")
parser.add_argument("--root-dir", help="root directory",
default="/mnt/sota/datasets/deepfake")
args = parser.parse_args()
return args
def main():
args = parse_args()
pairs = get_original_with_fakes(args.root_dir)
os.makedirs(os.path.join(args.root_dir, "diffs"), exist_ok=True)
with Pool(processes=os.cpu_count() - 2) as p:
with tqdm(total=len(pairs)) as pbar:
func = partial(save_diffs, root_dir=args.root_dir)
for v in p.imap_unordered(func, pairs):
pbar.update()
if __name__ == '__main__':
main()
| 31.858974
| 70
| 0.612475
| 337
| 2,485
| 4.299703
| 0.37092
| 0.043478
| 0.048309
| 0.026915
| 0.075914
| 0.048309
| 0.034507
| 0
| 0
| 0
| 0
| 0.017758
| 0.274849
| 2,485
| 77
| 71
| 32.272727
| 0.786349
| 0.027364
| 0
| 0
| 0
| 0
| 0.071665
| 0.011185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.216667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
003fa9552b48b33e85beb33eaf261aef70d7ae40
| 18,905
|
py
|
Python
|
Model/model_v2.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Model/model_v2.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Model/model_v2.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
import ast
import community
import datetime
import lightgbm as lgb
import math
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pickle
import plotly.express as px
import os
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
from tqdm import tqdm
from make_boruta import *
class Zeus:
"""
Class criada para construir modelo
"""
def __init__(self, termo, user, treino_id, test_id):
"""
Metodo construtor, aqui serão armazenadas as informações padrões do modelo:
- User
- run_id da base
"""
self.term = termo
self.data = datetime.date.today()
self.user = str(user).upper()
self.path_user = ''
self.treino_id = treino_id
self.test_id = test_id
self.var_treino = ''
self.var_teste = ''
self.filtro_local = False
self.filtro_data = False
self.local = ''
self.data_start = ''
self.data_end = ''
self.filtro = ''
self.random_state = 101
self.base_sintetica = ''
self.data_active = False
self.data_local = False
self.mm = ''
self.ids = ''
self.train = ''
self.numero_de_amostras_sinteticas_para_criar = ''
self.porcentagem_para_criacao_de_amostras = ''
self.df_cluster = ''
self.clusters = ''
self.var_teste_original = ''
self.pega_variaveis()
self.agregado = ''
self.df_agregado = ''
self.informacoes = ''
self.sentimento = ''
self.data_df = ''
self.load_df = ''
def pega_path_user(self):
"""
Metodo que pega o path de acordo com o usuario que inicializou a class
"""
os.chdir(os.path.dirname(
r'C:\Users\wilgn\Desktop\Faculdade\3° Semestre\Insper Data\Projeto\Git projeto\Data_BCG_News\Model\\'))
path_atual = os.getcwd()
#print(os.listdir())
if self.user == 'WILGNER':
path_aux_funcs = path_atual.replace('Model', r'aux_funcs\\')
else:
path_aux_funcs = path_atual.replace('Model', r'aux_funcs/')
os.chdir(os.path.dirname(path_aux_funcs))
#print(os.listdir())
with open('set_path.py', 'r') as arquivo_path:
ler_arquivo = arquivo_path.read()
dicionario = ast.literal_eval(ler_arquivo)
lista_users = list(dicionario.keys())
if self.user in lista_users:
print('USUARIO VALIDO !')
self.path_user = dicionario[self.user]
else:
raise TypeError(
'O USUARIO SELECIONADO NÃO TEM UM ENDEREÇO VALIDO CADASTRADO')
os.chdir(os.path.dirname(
r'C:\Users\wilgn\Desktop\Faculdade\3° Semestre\Insper Data\Projeto\Git projeto\Data_BCG_News\Model\\'))
#print(os.listdir())
# arquivo_path.close()
def valida_acesso_path_user(self):
self.pega_path_user()
try:
os.path.exists(self.path_user)
print('PATH VALIDO PARA ACESSO')
except:
raise TypeError('IMPOSSIVEL ACESSAR O PATH')
def pega_variaveis(self, teste=False, load_df=False, file_name_load=False):
if teste:
path_name = f'{self.path_user}Variables/{self.term}/{self.test_id}.parquet'
os.chdir(os.path.dirname(path_name))
self.var_teste = pd.read_parquet(os.path.basename(path_name))
elif load_df:
path_name = f'{self.path_user}Model/{file_name_load}'
os.chdir(os.path.dirname(path_name))
self.load_df = pd.read_parquet(os.path.basename(path_name))
else:
self.valida_acesso_path_user()
path_name = f'{self.path_user}Variables/{self.term}/{self.treino_id}.parquet'
os.chdir(os.path.dirname(path_name))
self.var_treino = pd.read_parquet(os.path.basename(path_name))
def seleciona_filtros(self, local=False, data_start=False, data_end=False):
"""
Isinstance verifica se houve uma solicitação de filtro em alguma das variaveis
"""
estado_local = isinstance(local, bool)
estado_data_start = isinstance(data_start, bool)
if not estado_local:
self.local = local
self.filtro_local = True
if not estado_data_start:
self.data_start = data_start
self.data_end = data_end
self.filtro_data = True
def construir_filtro(self, teste=False):
self.filtro = ''
if not teste:
if self.filtro_local and self.filtro_data:
self.filtro = (self.var_treino.sigla == self.local.upper())
self.data_local = True
self.data_active = True
elif self.filtro_data and not self.filtro_local:
self.data_active = True
elif self.filtro_local and not self.filtro_data:
self.filtro = (self.var_treino.sigla == self.local.upper())
else:
if self.filtro_local and self.filtro_data:
self.filtro = (self.var_teste.sigla == self.local.upper())
self.data_local = True
self.data_active = True
elif self.filtro_data and not self.filtro_local:
self.data_active = True
elif self.filtro_local and not self.filtro_data:
self.filtro = (self.var_teste.sigla == self.local.upper())
def filtrar_treino(self, local=False, data_start=False, data_end=False):
self.seleciona_filtros(
local=local, data_start=data_start, data_end=data_end)
self.construir_filtro()
self.var_treino.data = pd.to_datetime(self.var_treino.data)
if self.data_active and self.data_local:
self.var_treino = self.var_treino[self.filtro]
self.var_treino = self.var_treino[(self.var_treino.data > self.data_start) & (
self.var_treino.data < self.data_end)]
elif self.data_active and not self.data_local:
self.var_treino = self.var_treino[
(self.var_treino.data > self.data_start) & (self.var_treino.data < self.data_end)]
else:
self.var_treino = self.var_treino[self.filtro]
def filtrar_teste(self, local=False, data_start=False, data_end=False):
self.seleciona_filtros(
local=local, data_start=data_start, data_end=data_end)
self.construir_filtro(teste=True)
self.var_teste.data = pd.to_datetime(self.var_teste.data)
if self.data_active and self.data_local:
self.var_teste = self.var_teste[self.filtro]
self.var_teste = self.var_teste[
(self.var_teste.data > self.data_start) & (self.var_teste.data < self.data_end)]
elif self.data_active and not self.data_local:
self.var_teste = self.var_teste[
(self.var_teste.data > self.data_start) & (self.var_teste.data < self.data_end)]
else:
self.var_teste = self.var_teste[self.filtro]
def criar_base_sintetica(self, numero_de_amostras=3, porcentagem_para_criacao=.25):
bases_sinteticas = []
self.numero_de_amostras_sinteticas_para_criar = numero_de_amostras
self.porcentagem_para_criacao_de_amostras = porcentagem_para_criacao
colunas_pro_drop = ['unique_identifier', 'sigla', 'data']
for i in range(numero_de_amostras):
unique_identifier = self.var_treino['unique_identifier']
df_com_drop = self.var_treino.drop(columns=colunas_pro_drop)
df_com_colunas_sorteadas = df_com_drop.sample(
frac=porcentagem_para_criacao, replace=True, random_state=self.random_state, axis=1)
amostra = pd.concat(
[unique_identifier, df_com_colunas_sorteadas], axis=1)
amostra_sintetica = pd.DataFrame()
amostra = amostra.loc[:, ~amostra.columns.duplicated()]
for coluna in amostra.columns.tolist():
amostra_sintetica[coluna] = amostra[coluna].sample(frac=1, replace=True,
random_state=self.random_state).tolist()
amostra_sintetica['label'] = 1
amostra['label'] = 0
amostra_concluida = pd.concat([amostra, amostra_sintetica])
amostra_concluida.reset_index(inplace=True, drop=True)
bases_sinteticas.append(amostra_concluida)
self.base_sintetica = bases_sinteticas
def treina_lightGBM(self, boruta_percs=[10], thr_bor_good=.5, thr_bor_ok=.9):
numero_de_amostras = len(self.base_sintetica)
x_list = []
y_list = []
col_lists = []
model_list = []
trained_models = []
dfs = self.base_sintetica
for i in range(numero_de_amostras):
numero_de_colunas = dfs[i].shape[1]
self.Y = dfs[i]['label']
self.X = dfs[i].drop(columns=['unique_identifier', 'label'])
self.take_out_cols_0 = []
self.take_out_cols = []
self.full_cols = self.X.columns.tolist()
self.thr_bor_good = thr_bor_good
self.thr_bor_ok = thr_bor_ok
self.boruta_percs = boruta_percs
self.boruta_res = boruta_select(
X_df=self.X[[
col for col in self.full_cols if col not in self.take_out_cols]],
Y=self.Y, perc_list=self.boruta_percs, allowed_perc_good=self.thr_bor_good,
allowed_perc_med=self.thr_bor_ok)
self.take_out_cols_irrelevant = self.boruta_res[0].loc[~self.boruta_res[0]['use']].index.tolist(
)
self.take_out_cols += self.take_out_cols_irrelevant
self.use_cols = self.X[[col for col in self.X.columns.tolist(
) if col not in self.take_out_cols]].columns.tolist()
y_list += [dfs[i]['label'].values]
x_list += [dfs[i].drop(columns=['unique_identifier', 'label'])]
col_lists += [self.use_cols]
model_list += [{'type': 'LGBM',
'params': {'num_leaves': 25, 'n_estimators': 300, 'boosting_type': 'rf',
'bagging_fraction': .8, 'bagging_freq': 1, 'random_state': self.random_state}}]
# Treinando modelo
for (model, x, y, cols) in zip(model_list, x_list, y_list, col_lists):
X = x[cols]
Y = y
if model['type'] == 'LGBM':
model_to_train = lgb.LGBMClassifier(**model['params'])
trained_models += [model_to_train.fit(X=X.values, y=Y)]
self.models = trained_models
self.rf_models = self.models
self.col_lists = col_lists
def coleta_folhas(self, porcentagem_do_sample=0.1):
self.df_random = self.var_treino.sample(
frac=porcentagem_do_sample, replace=True, random_state=self.random_state, axis=0).copy()
print(self.df_random.shape)
# frame_list = []
model_c = 0
self.mm = set()
self.ids = self.df_random['unique_identifier'].tolist()
print('start with list values')
# Pegando o resultado das folhas do model
for (model, cols) in zip(self.rf_models, self.col_lists):
if cols == 'label':
continue
else:
raw_leafs = model.predict(
self.df_random[cols].values, pred_leaf=True)
# return raw_leafs
if model_c == 0:
full_leafs = raw_leafs
else:
full_leafs = np.concatenate(
(full_leafs, raw_leafs), axis=1)
model_c += 1
self.raw = raw_leafs
def criando_matriz_de_similaridade(self, porcentagem_do_sample=0.1):
self.porcentagem_para_matriz = porcentagem_do_sample
self.coleta_folhas(porcentagem_do_sample=porcentagem_do_sample)
print('CRIANDO EDGES')
edges = []
# Criando matriz de similaridade
for cc1, i in tqdm(enumerate(self.raw), 'FOLHAS:'):
if cc1 % 100 == 0:
print(cc1, datetime.datetime.now())
for cc2_, j in enumerate(self.raw[cc1 + 1:]):
cc2 = cc2_ + cc1 + 1
if (cc1, cc2) not in self.mm and (cc2, cc1) not in self.mm:
leaf_count = sum(i == j)
# TODO: Fix similarity matrix with the square root
edges += [(self.ids[cc1], self.ids[cc2],
math.sqrt(leaf_count / len(self.raw[0])))]
self.mm.add((cc1, cc2))
print('done with list values')
# YOU ARE HERE
G = nx.Graph()
G.add_weighted_edges_from(edges)
self.G = G
def rodando_louvain(self, porcentagem_do_sample):
self.criando_matriz_de_similaridade(
porcentagem_do_sample=porcentagem_do_sample)
self.clusters = (community.best_partition(
self.G, weight='weight', randomize=True))
def desenha_cluster_no_edges(self):
plt.figure(figsize=(12, 8), dpi=150)
plt.title('Louvain Tets', fontsize=20, loc='left', pad=15)
self.pos = nx.spring_layout(self.G)
nx.draw_networkx_nodes(self.G, self.pos, self.clusters.keys(), node_size=150,
node_color=list(self.clusters.values()))
plt.show()
def classifica_agrupamento(self, boruta_percs=[10], thr_bor_good=.5, thr_bor_ok=.9, take_out_cols=False):
self.df_cluster = pd.DataFrame({'Rotulo': self.clusters.keys(),
'Label': self.clusters.values()})
print(f'Tamanho dos dados de cluster {self.df_cluster.shape}')
self.train = self.var_treino[self.var_treino['unique_identifier'].isin(
self.df_cluster.Rotulo.values.tolist())]
self.train['label'] = self.df_cluster.Label.values.tolist()
self.train.reset_index(drop=True)
colunas_pro_drop = ['unique_identifier', 'sigla', 'data', 'artigo_original']
self.var_teste_original = self.var_teste
self.sentimento = self.var_teste_original['sentimento']
self.data_df = self.var_teste_original['data']
self.var_teste = self.var_teste.drop(columns=colunas_pro_drop)
self.train = self.train.drop(columns=['sigla', 'data', 'artigo_original'])
self.var_teste.reset_index(drop=True)
self.train.reset_index(drop=True)
print(f'Tamanho dos dados de treinamento {self.train.shape}')
print(f'Tamanho dos dados de teste {self.var_teste.shape}')
self.x_list = []
self.y_list = []
self.col_lists = []
model_list = []
trained_models = 0
self.Y = self.train['label']
self.X = self.train.drop(columns=['unique_identifier', 'label'])
self.take_out_cols_0 = []
self.take_out_cols = []
self.full_cols = self.X.columns.tolist()
self.thr_bor_good = thr_bor_good
self.thr_bor_ok = thr_bor_ok
self.boruta_percs = boruta_percs
self.boruta_res = boruta_select(X_df=self.X[[col for col in self.full_cols if col not in self.take_out_cols]],
Y=self.Y, perc_list=self.boruta_percs, allowed_perc_good=self.thr_bor_good,
allowed_perc_med=self.thr_bor_ok)
self.take_out_cols_irrelevant = self.boruta_res[0].loc[~self.boruta_res[0]['use']].index.tolist(
)
self.take_out_cols += self.take_out_cols_irrelevant
self.use_cols = self.X[[col for col in self.X.columns.tolist(
) if col not in self.take_out_cols]].columns.tolist()
if len(self.use_cols) < 1:
self.use_cols = self.X.columns.tolist()
self.y_list += [self.train['label'].values]
self.x_list += [self.train.drop(columns=['unique_identifier', 'label'])]
self.col_lists += [self.use_cols]
model_list += [{'type': 'LGBM',
'params': {'num_leaves': 30, 'n_estimators': 500, 'boosting_type': 'rf',
'bagging_fraction': .8, 'bagging_freq': 1, 'random_state': self.random_state}}]
for (model, x, y, cols) in zip(model_list, self.x_list, self.y_list, self.col_lists):
X = x[cols]
print(X.shape)
Y = y
print(Y.shape)
if model['type'] == 'LGBM':
model_to_train = lgb.LGBMClassifier(**model['params'])
trained_models = model_to_train.fit(X=X.values, y=Y)
self.models = trained_models
self.previsão = trained_models.predict(
self.var_teste[self.col_lists[0]])
self.resultado = self.previsão
self.var_teste['label'] = self.resultado
self.faz_agregacao()
print('FREQUENCIA CLUSTER')
print(self.df_cluster.Label.value_counts(sort=False))
print('********************')
print('FREQUENCIA CLASSIFICADO')
print(self.var_teste.label.value_counts(sort=False))
def plota_palavras_maiores(self, numero):
for i in range(len(self.var_teste.label.unique())):
df_data = pd.DataFrame({'word': self.var_teste[self.var_teste.label == i].drop(
columns=['label']).sum(axis=0).nlargest(numero).index.tolist(),
'value': self.var_teste[self.var_teste.label == i].drop(
columns=['label']).sum(axis=0).nlargest(
numero).values.tolist()})
fig = px.bar(df_data, x='word', y='value', color='value', color_continuous_scale='Blues')
fig.show()
def salva_parametros(self):
self.informacoes = {
'user': self.user,
'data': self.data,
'run_id_treino': self.treino_id,
'run_id_teste': self.test_id,
'path_user': self.path_user,
'filtro_nome': self.term,
'filtro_data': self.data,
'filtro_local': self.local,
'numero_de_amostras_bases_sinteticas': self.numero_de_amostras_sinteticas_para_criar,
'porcentagem_para_criacao_de_amostras': self.porcentagem_para_criacao_de_amostras,
'porcentagem_para_matriz': self.porcentagem_para_matriz
}
def faz_agregacao(self):
# Agrega os resultados
self.var_teste_original['label'] = self.var_teste['label']
self.agregado = self.var_teste_original[['unique_identifier', 'sigla', 'data', 'label']]
self.df_agregado = pd.crosstab(self.agregado.sigla, self.agregado.label, normalize='index')
return self.df_agregado
| 41.187364
| 118
| 0.598678
| 2,394
| 18,905
| 4.477026
| 0.155806
| 0.03984
| 0.042545
| 0.020899
| 0.509237
| 0.474156
| 0.426759
| 0.391771
| 0.358649
| 0.334018
| 0
| 0.006843
| 0.288865
| 18,905
| 458
| 119
| 41.277293
| 0.790241
| 0.029992
| 0
| 0.256906
| 0
| 0.005525
| 0.087622
| 0.023428
| 0
| 0
| 0
| 0.00655
| 0
| 1
| 0.049724
| false
| 0
| 0.041436
| 0
| 0.096685
| 0.046961
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0044e564b42a943f2371101ac16d4cb0e1aee8d7
| 42,840
|
py
|
Python
|
roosterize/data/DataMiner.py
|
EngineeringSoftware/roosterize
|
2990f7bdef8889045a26f3e9aaaca96d9c92e0bc
|
[
"MIT"
] | 16
|
2020-06-05T20:01:56.000Z
|
2022-02-09T16:10:09.000Z
|
roosterize/data/DataMiner.py
|
EngineeringSoftware/roosterize
|
2990f7bdef8889045a26f3e9aaaca96d9c92e0bc
|
[
"MIT"
] | 6
|
2020-07-02T15:22:36.000Z
|
2020-12-16T13:04:16.000Z
|
roosterize/data/DataMiner.py
|
EngineeringSoftware/roosterize
|
2990f7bdef8889045a26f3e9aaaca96d9c92e0bc
|
[
"MIT"
] | 3
|
2020-07-21T17:37:51.000Z
|
2020-12-10T05:36:32.000Z
|
from typing import *
import collections
import copy
import hashlib
import math
import numpy as np
from pathlib import Path
import random
import re
from tqdm import tqdm
import traceback
import sys
from seutil import LoggingUtils, IOUtils, BashUtils
from seutil.project import Project
from roosterize.data.CoqDocument import CoqDocument
from roosterize.FilesManager import FilesManager
from roosterize.data.Definition import Definition
from roosterize.data.Lemma import Lemma
from roosterize.data.LemmaBackendSexpTransformers import LemmaBackendSexpTransformers
from roosterize.data.LemmaForeendSexpTransformers import LemmaForeendSexpTransformers
from roosterize.Environment import Environment
from roosterize.Macros import Macros
from roosterize.parser.CoqParser import CoqParser
from roosterize.parser.ParserUtils import ParserUtils
from roosterize.parser.SexpAnalyzer import SexpAnalyzer, SexpInfo
from roosterize.sexp import *
from roosterize.Utils import Utils
class DataMiner:
logger = LoggingUtils.get_logger(__name__, LoggingUtils.DEBUG)
from roosterize.Debug import Debug
if Debug.is_debug: logger.setLevel(LoggingUtils.DEBUG)
Project.set_downloads_dir(Macros.downloads_dir)
TASK_COQ_DOCUMENTS = FilesManager.COQ_DOCUMENTS # "coq-documents"
TASK_DATA_INDEXES = FilesManager.DATA_INDEXES # "data-indexes"
TASK_DEFINITIONS = FilesManager.DEFINITIONS # "definitions"
TASK_INSTALL_COQ_PROJECTS = "install-coq-projects"
TASK_LEMMA = FilesManager.LEMMAS # "lemmas"
TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS = FilesManager.LEMMAS_BACKEND_SEXP_TRANSFORMATIONS # "lemmas-bsexp-transformations"
TASK_LEMMA_FILTERED = FilesManager.LEMMAS_FILTERED # "lemmas-filtered"
TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS = FilesManager.LEMMAS_FOREEND_SEXP_TRANSFORMATIONS # "lemmas-fsexp-transformations"
dataset_dir = Macros.project_dir.parent / "math-comp-corpus"
@classmethod
def collect_data(cls, **options) -> NoReturn:
data_mgr = FilesManager(cls.dataset_dir)
task = options["task"]
projects_path = Path(options.get("corpus", cls.dataset_dir / "projects-standalone-8.10.yml"))
projects: List[Project] = IOUtils.dejsonfy(IOUtils.load(projects_path, "json"), Project)
if task == cls.TASK_COQ_DOCUMENTS:
files = Utils.get_option_as_list(options, "files", None)
is_verifying_tokenizer = Utils.get_option_as_boolean(options, "verify-tokenizer")
cls.collect_coq_documents_projects(data_mgr, projects, files, is_verifying_tokenizer)
elif task == cls.TASK_DATA_INDEXES:
cls.collect_data_indexes(data_mgr, projects)
elif task == cls.TASK_DEFINITIONS:
cls.collect_definitions(data_mgr)
elif task == cls.TASK_INSTALL_COQ_PROJECTS:
cls.install_coq_projects(projects)
elif task == cls.TASK_LEMMA:
files = Utils.get_option_as_list(options, "files", None)
cls.collect_lemmas(data_mgr, projects, files)
elif task == cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS:
cls.collect_lemmas_backend_sexp_transformations(data_mgr)
elif task == cls.TASK_LEMMA_FILTERED:
cls.filter_lemmas(data_mgr)
elif task == cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS:
cls.collect_lemmas_foreend_sexp_transformations(data_mgr)
else:
LoggingUtils.log_and_raise(cls.logger, f"Unknown task {task}", ValueError)
# end if
return
@classmethod
def collect_coq_documents_projects(cls,
data_mgr: FilesManager,
projects: List[Project],
files: List[str] = None,
is_verifying_tokenizer: bool = False,
) -> NoReturn:
# Prepare the used directories (coq-documents, raw-files, original-files)
for rel_path in [
[FilesManager.COQ_DOCUMENTS],
[FilesManager.RAW_FILES],
[FilesManager.ORIGINAL_FILES],
]:
data_mgr.clean_path(rel_path)
data_mgr.resolve(rel_path).mkdir(parents=True)
# end for
coq_documents: List[CoqDocument] = list()
names_projects = {p.full_name: p for p in projects}
for i, project in enumerate(projects):
try:
cls.logger.info(f"Project {i + 1}/{len(projects)}: {project.full_name}")
coq_documents_project = cls.collect_coq_documents_project(data_mgr, project, names_projects=names_projects, files=files, is_verifying_tokenizer=is_verifying_tokenizer)
except KeyboardInterrupt:
raise
except:
cls.logger.warning(f"Error while processing project {project.full_name}: {traceback.format_exc()}")
continue
else:
coq_documents.extend(coq_documents_project)
# end try
# end for
# Save datasets
data_mgr.dump_data([FilesManager.COQ_DOCUMENTS, FilesManager.COQ_DOCUMENTS], coq_documents, IOUtils.Format.json, is_batched=True)
return
@classmethod
def load_coq_documents(cls, data_mgr: FilesManager) -> List[CoqDocument]:
return data_mgr.load_data([FilesManager.COQ_DOCUMENTS, FilesManager.COQ_DOCUMENTS], IOUtils.Format.json, is_batched=True, clz=CoqDocument)
@classmethod
def collect_coq_documents_project(cls,
data_mgr: FilesManager,
project: Project,
names_projects: Dict[str, Project],
files: List[str] = None,
is_verifying_tokenizer: bool = False,
) -> List[CoqDocument]:
coq_documents: List[CoqDocument] = list()
# Clone and checkout repo
project.clone()
project.checkout(project.data["sha"], is_forced=True)
# Build the project
cls.install_coq_project(project, names_projects)
# For each file, parse code to tokens
with IOUtils.cd(project.checkout_dir):
coq_files: List[str] = BashUtils.run(f"find -name '*.v' -type f").stdout.split("\n")[:-1]
if files is not None:
coq_files = [f for f in coq_files if f[2:] in files] # [2:] is to remove the ./
# end if
re_ignore_path = re.compile(project.data["ignore_path_regex"]) if "ignore_path_regex" in project.data else None
for i, coq_file in enumerate(coq_files):
try:
coq_file = coq_file[2:]
cls.logger.debug(f"File {i + 1}/{len(coq_files)}: {coq_file}")
# Check if file is ignored
if re_ignore_path is not None and re_ignore_path.fullmatch(coq_file):
cls.logger.info(f"Ignoring file {coq_file}")
continue
# end if
# Read file
with open(coq_file, "r", newline="") as f:
source_code = f.read()
# end with
# Get unicode offsets
unicode_offsets = ParserUtils.get_unicode_offsets(source_code)
# Save original file to original_files
data_mgr.dump_data([FilesManager.ORIGINAL_FILES,project.full_name, coq_file], source_code, IOUtils.Format.txt)
# Call SerAPI
serapi_options = project.data.get("serapi_options", "")
ast_sexp_str: str = BashUtils.run(f"sercomp {serapi_options} --mode=sexp -- {coq_file}", expected_return_code=0).stdout
tok_sexp_str: str = BashUtils.run(f"sertok {serapi_options} -- {coq_file}", expected_return_code=0).stdout
# Save ast sexp to dataset (.ast.sexp)
data_mgr.dump_data([FilesManager.RAW_FILES,project.full_name, coq_file[:-2] + ".ast.sexp"], ast_sexp_str, IOUtils.Format.txt)
# Save tok sexp to dataset (.tok.sexp)
data_mgr.dump_data([FilesManager.RAW_FILES, project.full_name, coq_file[:-2] + ".tok.sexp"], tok_sexp_str, IOUtils.Format.txt)
# Parse ast sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(ast_sexp_str)
tok_sexp_list: List[SexpNode] = SexpParser.parse_list(tok_sexp_str)
# Verify the tokenizer if requested
if is_verifying_tokenizer:
if not cls.verify_tokenizer(tok_sexp_list, source_code, unicode_offsets):
LoggingUtils.log_and_raise(cls.logger, "Tokenized content doesn't match original file!", Exception)
# end if
# end if
# Parse the document
coq_document = CoqParser.parse_document(source_code, ast_sexp_list, tok_sexp_list, unicode_offsets=unicode_offsets)
# Save the parsed document (printed format) to raw_files
data_mgr.dump_data([FilesManager.RAW_FILES, project.full_name, coq_file], coq_document.str_with_space(), IOUtils.Format.txt)
# Set meta data
coq_document.file_name = coq_file
coq_document.project_name = project.full_name
coq_document.revision = project.revision
coq_documents.append(coq_document)
except KeyboardInterrupt:
cls.logger.warning("Keyboard interrupt!")
raise
except:
cls.logger.warning(f"File {coq_file} failed! Exception was: {traceback.format_exc()}")
continue
# end try
# end for
# end with
return coq_documents
@classmethod
def verify_tokenizer(cls, tok_sexp_list: List[SexpNode], source_code: str, unicode_offsets: List[int]) -> bool:
sertok_sentences = SexpAnalyzer.analyze_sertok_sentences(tok_sexp_list, unicode_offsets)
vernac_sentences = CoqParser.parse_sertok_sentences(sertok_sentences, source_code)
code_i = 0
has_error: bool = False
for sent_i, sentence in enumerate(vernac_sentences):
for token_i, token in enumerate(sentence.tokens):
# Check space/comment
if token.beg_charno != code_i:
if not ParserUtils.is_ws_or_comment(source_code[code_i:token.beg_charno]):
cls.logger.error(f"Unresolved characters at charno {code_i} to {token.beg_charno}; next expect token {token.content} beginning at charno {token.beg_charno} (lineno {token.lineno}); file content {source_code[code_i:token.beg_charno]};")
cls.logger.error(f"assotiated sexp: \n{tok_sexp_list[sent_i][1][token_i].pretty_format()}")
has_error = True
# end if
# end if
# Check token
code_i = token.beg_charno
if token.content != source_code[code_i:token.end_charno]:
cls.logger.error(f"Mismatch token at charno {code_i} to {token.end_charno}; expect token {token.content} beginning at charno {token.beg_charno} (lineno {token.lineno}); file content {source_code[code_i:token.end_charno]};")
cls.logger.error(f"assotiated sexp: \n{tok_sexp_list[sent_i][1][token_i].pretty_format()}")
has_error = True
# end if
code_i = token.end_charno
# end for, for
# Check space/comment at end of file
if code_i != len(source_code):
if not ParserUtils.is_ws_or_comment(source_code[code_i:len(source_code)]):
cls.logger.error(f"Unresolved characters at charno {code_i} to {len(source_code)} (end of file); file content {source_code[code_i:len(source_code)]}")
has_error = True
# end if
# end if
return not has_error
@classmethod
def install_coq_projects(cls, projects: List[Project]) -> None:
names_projects = {p.full_name: p for p in projects}
for i, p in enumerate(projects):
cls.logger.info(f"Installing {p.full_name} ({i}/{len(projects)})")
cls.install_coq_project(p, names_projects)
# end for
return
@classmethod
def install_coq_project(cls, project: Project, names_projects: Dict[str, Project]) -> None:
"""
:requires: the project is cloned and checked-out to the desired version.
"""
if not project.is_cloned:
project.clone()
project.checkout(project.data["sha"], is_forced=True)
# end if
# Check if the project is already compiled
confirmation_file = "lpc-installed.txt"
confirmation_content = project.revision + " " + BashUtils.run("opam list coq -s", expected_return_code=0).stdout.strip()
if (project.checkout_dir/confirmation_file).is_file() and IOUtils.load(project.checkout_dir/confirmation_file, "txt") == confirmation_content:
cls.logger.debug(f"Project {project.full_name} already installed")
return
# end if
project.clean()
# Install dependencies
for dependency in project.data.get("dependencies", []):
dependency_project = names_projects.get(dependency)
if dependency_project is None: raise Exception(f"Cannot find dependency {dependency}")
cls.logger.info(f"For Project {project.full_name}, installing dependency {dependency}")
cls.install_coq_project(dependency_project, names_projects)
# end for
if "build_cmd" not in project.data: raise Exception(f"Project {project.full_name} does not have build_cmd")
if "install_cmd" not in project.data: raise Exception(f"Project {project.full_name} does not have install_cmd")
with IOUtils.cd(project.checkout_dir):
# Build
cls.logger.info(f"Project {project.full_name}: Building with {project.data['build_cmd']}")
r = BashUtils.run(project.data["build_cmd"])
if r.return_code != 0:
raise Exception(f"Compilation failed! Return code is {r.return_code}! stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
else:
cls.logger.debug(f"Compilation finished. Return code is {r.return_code}. stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
# end if
# Install
cls.logger.info(f"Project {project.full_name}: Installing with {project.data['install_cmd']}")
r = BashUtils.run(project.data["install_cmd"])
if r.return_code != 0:
raise Exception(f"Installation failed! Return code is {r.return_code}! stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
else:
cls.logger.debug(f"Installation finished. Return code is {r.return_code}. stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
# end if
IOUtils.dump(project.checkout_dir / confirmation_file, confirmation_content, "txt")
# end with
return
@classmethod
def collect_data_indexes(cls, data_mgr: FilesManager, projects: List[Project]) -> NoReturn:
"""
Split the dataset and record the data indexes for {t1, t2, t3, lo, ta, allgroup} * {train, val, test, all} dataset parts.
"""
data_mgr.clean_path([FilesManager.DATA_INDEXES])
data_mgr.resolve([FilesManager.DATA_INDEXES]).mkdir(parents=True)
# (Random) Split by train/val/test
cls.logger.info(f"Splitting regular dataset info train/val/test sets with ratio of {Macros.DS_TRAIN_RATIO}/{Macros.DS_VAL_RATIO}/{Macros.DS_TEST_RATIO}")
cls.logger.info(f"Splitting leave-out dataset info train/val/test sets with ratio of {Macros.DS_LO_TRAIN_RATIO}/{Macros.DS_LO_VAL_RATIO}/{Macros.DS_LO_TEST_RATIO}")
# Load and sort coq-documents data
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
coq_documents.sort(key=lambda d: d.get_data_index())
cls.logger.info(f"Total dataset #doc = {len(coq_documents)}")
if len(coq_documents) < 10:
cls.logger.warning(f"Dataset is probably too small: {len(coq_documents)}")
# end if
trainevals_data_indexes: Dict[str, Set[str]] = collections.defaultdict(set)
# Split data for each project, using the same random seed salted with the project name
for project in projects:
documents_this_project: List[CoqDocument] = sorted([d for d in coq_documents if d.project_name == project.full_name])
hasher = hashlib.sha256()
hasher.update(str.encode(project.full_name))
hasher.update(str.encode(str(Environment.random_seed)))
salted_seed = int.from_bytes(hasher.digest(), "big")
random.seed(salted_seed)
random.shuffle(documents_this_project)
if project.data["group"] in [Macros.DS_GROUP_T1, Macros.DS_GROUP_T2, Macros.DS_GROUP_T3]:
train_ratio, val_ratio, test_ratio = Macros.DS_TRAIN_RATIO, Macros.DS_VAL_RATIO, Macros.DS_TEST_RATIO
elif project.data["group"] in [Macros.DS_GROUP_LO]:
train_ratio, val_ratio, test_ratio = Macros.DS_LO_TRAIN_RATIO, Macros.DS_LO_VAL_RATIO, Macros.DS_LO_TEST_RATIO
else:
LoggingUtils.log_and_raise(cls.logger, f"Invalid group name {project.data['group']} for {project.full_name}", Exception)
# end if
train_val_split_point = int(math.ceil(train_ratio * len(documents_this_project)))
val_test_split_point = int(math.ceil((train_ratio + val_ratio) * len(documents_this_project)))
trainevals_data_indexes[Macros.DS_TRAIN].update(set([d.get_data_index() for d in documents_this_project[:train_val_split_point]]))
trainevals_data_indexes[Macros.DS_VAL].update(set([d.get_data_index() for d in documents_this_project[train_val_split_point:val_test_split_point]]))
trainevals_data_indexes[Macros.DS_TEST].update(set([d.get_data_index() for d in documents_this_project[val_test_split_point:]]))
# end for
trainevals_data_indexes[Macros.DS_TRAINEVAL_ALL] = set.union(*trainevals_data_indexes.values())
cls.logger.info(f"Train/eval split #doc:\n" + ";\n".join([
f"{traineval}: {len(data_indexes)}"
for traineval, data_indexes in trainevals_data_indexes.items()
]))
# Split by groups
groups_project_names: Dict[str, List[str]] = {group: [p.full_name for p in projects if p.data["group"] == group] for group in Macros.DS_GROUPS}
groups_data_indexes: Dict[str, Set[str]] = dict()
for group, project_names in groups_project_names.items():
documents_this_group: List[CoqDocument] = [d for d in coq_documents if d.project_name in project_names]
groups_data_indexes[group] = set([d.get_data_index() for d in documents_this_group])
# end for
groups_data_indexes[Macros.DS_GROUP_TA] = set.union(groups_data_indexes[Macros.DS_GROUP_T1], groups_data_indexes[Macros.DS_GROUP_T2], groups_data_indexes[Macros.DS_GROUP_T3])
groups_data_indexes[Macros.DS_GROUP_ALL] = set.union(groups_data_indexes[Macros.DS_GROUP_T1], groups_data_indexes[Macros.DS_GROUP_T2], groups_data_indexes[Macros.DS_GROUP_T3], groups_project_names[Macros.DS_GROUP_LO])
cls.logger.info(f"Groups split #doc:\n" + ";\n".join([
f"{group}: {len(data_indexes)}"
for group, data_indexes in groups_data_indexes.items()
]))
# The final data indexes is cross product of the two splits
for traineval in Macros.DS_TRAINEVALS + [Macros.DS_TRAINEVAL_ALL]:
for group in Macros.DS_GROUPS + [Macros.DS_GROUP_TA, Macros.DS_GROUP_ALL]:
data_indexes = list(set.intersection(groups_data_indexes[group], trainevals_data_indexes[traineval]))
cls.logger.info(f"{group}-{traineval} #doc = {len(data_indexes)}")
data_mgr.dump_data([FilesManager.DATA_INDEXES, f"{group}-{traineval}.json"], data_indexes, IOUtils.Format.jsonPretty)
# end for
# end for
return
RE_PATH_TO_QUALIFIED_PREFIX = re.compile(r"-[QR] (?P<path>[^,]+),(?P<qprefix>\S+)")
@classmethod
def collect_lemmas(cls, data_mgr: FilesManager, projects: List[Project], files: List[str] = None):
data_mgr.clean_path([FilesManager.LEMMAS])
data_mgr.resolve([FilesManager.LEMMAS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
# Load coq-documents
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
if files is not None: coq_documents = [d for d in coq_documents if d.file_name in files]
lemmas: List[Lemma] = list()
# Prepare serapi_options
project_2_serapi_options: Dict[str, str] = {p.full_name: p.data["serapi_options"] for p in projects}
errors: List[Tuple[str, str]] = list()
for doc_i, doc in enumerate(tqdm(coq_documents)):
try:
cls.logger.info(f"Collecting from file {doc.get_data_index()} ({doc_i}/{len(coq_documents)}). Collected: {len(lemmas)}")
# Load AST sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(data_mgr.load_data([FilesManager.RAW_FILES, doc.get_data_index()[:-2] + ".ast.sexp"], IOUtils.Format.txt))
# Collect lemmas from this doc
lemmas_doc: List[Lemma] = cls.collect_lemmas_doc(doc, ast_sexp_list, project_2_serapi_options[doc.project_name])
lemmas.extend(lemmas_doc)
except KeyboardInterrupt:
cls.logger.warning(f"Keyboard Interrupt!")
raise
except:
cls.logger.warning(f"Error while parsing {doc.get_data_index()}: {traceback.format_exc()}")
cls.logger.warning(f"The script will continue on other files before it returns with failure. Use Ctrl+C to cut it early.")
errors.append((doc.get_data_index(), traceback.format_exc()))
continue
# end try
# end for
if len(errors) > 0:
LoggingUtils.log_and_raise(cls.logger, f"There were {len(errors)} errors during collection.", Exception)
data_mgr.dump_data([FilesManager.LEMMAS, "errors.txt"], errors, IOUtils.Format.jsonPretty)
# end if
# Assign uids
for lemma_i, lemma in enumerate(lemmas): lemma.uid = lemma_i
data_mgr.dump_data([FilesManager.LEMMAS], lemmas, IOUtils.Format.json, is_batched=True, per_batch=5000)
return
@classmethod
def filter_lemmas(cls, data_mgr: FilesManager):
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
data_mgr.clean_path([FilesManager.LEMMAS_FILTERED])
data_mgr.resolve([FilesManager.LEMMAS_FILTERED]).mkdir(parents=True)
# Load lemmas
lemmas: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS], IOUtils.Format.json, is_batched=True, clz=Lemma)
heights: List[int] = [l.backend_sexp.height() for l in lemmas]
depth_cutoff_point = sorted(heights)[int(np.ceil(Macros.LEMMAS_DEPTH_CUTOFF * len(lemmas)))]
data_indexes_names: List[Tuple[str, str]] = [(l.data_index, l.name) for l in lemmas if l.backend_sexp.height() <= depth_cutoff_point]
cls.logger.info(f"Cutoff depth is {depth_cutoff_point}, and {len(data_indexes_names)} data are included")
lemmas_filtered: List[Lemma] = [l for l in lemmas if (l.data_index, l.name) in data_indexes_names]
# Assign uids
for lemma_i, lemma in enumerate(lemmas_filtered): lemma.uid = lemma_i
data_mgr.dump_data([FilesManager.LEMMAS_FILTERED], lemmas_filtered, IOUtils.Format.json, is_batched=True, per_batch=5000)
return
@classmethod
def collect_definitions(cls, data_mgr: FilesManager):
data_mgr.clean_path([FilesManager.DEFINITIONS])
data_mgr.resolve([FilesManager.DEFINITIONS]).mkdir(parents=True)
# Load coq-documents
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
definitions: List[Definition] = list()
errors: List[Tuple[str, str]] = list()
for doc_i, doc in enumerate(tqdm(coq_documents)):
try:
# Load AST sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(data_mgr.load_data([FilesManager.RAW_FILES, doc.get_data_index()[:-2] + ".ast.sexp"], IOUtils.Format.txt))
definitions_doc: List[Definition] = cls.collect_definitions_doc(doc, ast_sexp_list)
definitions.extend(definitions_doc)
except KeyboardInterrupt:
cls.logger.warning(f"Keyboard Interrupt!")
raise
except:
cls.logger.warning(f"Error while parsing {doc.get_data_index()}: {traceback.format_exc()}")
cls.logger.warning(f"The script will continue on other files before it returns with failure. Use Ctrl+C to cut it early.")
errors.append((doc.get_data_index(), traceback.format_exc()))
continue
# end try
# end for
if len(errors) > 0:
LoggingUtils.log_and_raise(cls.logger, f"There were {len(errors)} errors during collection.", Exception)
data_mgr.dump_data([FilesManager.DEFINITIONS, "errors.txt"], errors, IOUtils.Format.jsonPretty)
# end if
data_mgr.dump_data([FilesManager.DEFINITIONS, "definitions.json"], definitions, IOUtils.Format.json)
return
@classmethod
def collect_lemmas_backend_sexp_transformations(cls, data_mgr: FilesManager):
data_mgr.clean_path([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS])
data_mgr.resolve([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
# Main stream transformations, applied one after another
levels_lemmas_bsexp_transformed: Dict[str, List[SexpNode]] = dict()
last_level: Optional[str] = None # None means original
for level in LemmaBackendSexpTransformers.LEVELS:
cls.logger.info(f"Doing {last_level if last_level is not None else 'orig'} -> {level} transformation")
levels_lemmas_bsexp_transformed[level] = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.backend_sexp if last_level is None else levels_lemmas_bsexp_transformed[last_level][lemma_i]
bsexp_transformed = LemmaBackendSexpTransformers.transform(level, copy.deepcopy(orig_sexp))
levels_lemmas_bsexp_transformed[level].append(bsexp_transformed)
# end for
last_level = level
data_mgr.dump_data([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS, level, "transformed"], levels_lemmas_bsexp_transformed[level], IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
# Other special transformation, directly applied on original trees
for tr_name in LemmaBackendSexpTransformers.SPECIALS:
cls.logger.info(f"Doing orig -> {tr_name} transformation")
bsexp_transformed_list = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.backend_sexp
bsexp_transformed = LemmaBackendSexpTransformers.transform(tr_name, copy.deepcopy(orig_sexp))
bsexp_transformed_list.append(bsexp_transformed)
# end for
data_mgr.dump_data([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS, tr_name, "transformed"], bsexp_transformed_list, IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
return
@classmethod
def collect_lemmas_foreend_sexp_transformations(cls, data_mgr: FilesManager):
data_mgr.clean_path([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS])
data_mgr.resolve([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
# Main stream transformations, applied one after another
levels_lemmas_fsexp_transformed: Dict[str, List[SexpNode]] = dict()
last_level: Optional[str] = None # None means original
for level in LemmaForeendSexpTransformers.LEVELS:
cls.logger.info(f"Doing {last_level if last_level is not None else 'orig'} -> {level} transformation")
levels_lemmas_fsexp_transformed[level] = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.ast_sexp if last_level is None else levels_lemmas_fsexp_transformed[last_level][lemma_i]
fsexp_transformed = LemmaForeendSexpTransformers.transform(level, copy.deepcopy(orig_sexp))
levels_lemmas_fsexp_transformed[level].append(fsexp_transformed)
# end for
last_level = level
data_mgr.dump_data([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS, level, "transformed"], levels_lemmas_fsexp_transformed[level], IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
# Other special transformation, directly applied on level 0 trees
for tr_name in LemmaForeendSexpTransformers.SPECIALS:
cls.logger.info(f"Doing {LemmaForeendSexpTransformers.LEVEL_0} -> {tr_name} transformation")
fsexp_transformed_list = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = levels_lemmas_fsexp_transformed[LemmaForeendSexpTransformers.LEVEL_0][lemma_i]
fsexp_transformed = LemmaForeendSexpTransformers.transform(tr_name, copy.deepcopy(orig_sexp))
fsexp_transformed_list.append(fsexp_transformed)
# end for
data_mgr.dump_data([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS, tr_name, "transformed"], fsexp_transformed_list, IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
return
VTYPES_LEMMA = [SexpInfo.VernacConsts.type_start_theorem_proof]
VTYPES_MODULE_BEG = [SexpInfo.VernacConsts.type_define_module]
VTYPES_MODULE_END = [SexpInfo.VernacConsts.type_end_segment]
VTYPES_DEFINITIONS = [SexpInfo.VernacConsts.type_definition]
@classmethod
def collect_lemmas_doc(
cls,
doc: CoqDocument,
ast_sexp_list: List[SexpNode],
serapi_options: str,
) -> List[Lemma]:
lemmas_doc: List[Lemma] = list()
data_index = doc.get_data_index()
# Maintain a stack of module
modules: List[str] = list()
# Prepare qualified name prefix
qprefix_this_doc = "./" + doc.file_name[:-2] # Remove .v
for m in cls.RE_PATH_TO_QUALIFIED_PREFIX.finditer(serapi_options):
path = m.group("path")
if path != ".": path = "./" + path
qprefix = m.group("qprefix")
if qprefix_this_doc.startswith(path):
qprefix_this_doc = qprefix + qprefix_this_doc[len(path):]
break
# end if
# end for
if qprefix_this_doc.startswith("./"): qprefix_this_doc = qprefix_this_doc[len("./"):]
qprefix_this_doc = qprefix_this_doc.replace("/", ".")
for sent_i, sent in enumerate(doc.sentences):
ast_sexp = ast_sexp_list[sent_i]
vernac = SexpAnalyzer.analyze_vernac(ast_sexp)
if vernac.vernac_type in cls.VTYPES_MODULE_BEG:
# (VernacExpr()(VernacDefineModule() ( ( v ( Id <module name>)) ...
# 0 1 2 20 21 22 220 2201 22011
module_name = vernac.vernac_sexp[2][2][0][1][1].content_no_quote
modules.append(module_name)
elif vernac.vernac_type in cls.VTYPES_MODULE_END:
# (VernacExpr()(VernacEndSegment ( ( v ( Id <module name>)) ...
# 0 1 2 20 21 210 2101 21011
try:
module_name = vernac.vernac_sexp[2][1][0][1][1].content_no_quote
except:
print(vernac.vernac_sexp.pretty_format())
raise
# end try
if len(modules) > 0 and module_name == modules[-1]: modules.pop() # EndModule and EndSection share the same vernac type
elif vernac.vernac_type in cls.VTYPES_LEMMA:
# (VernacExpr()(VernacStartTheoremProof Lemma ( ( ( ( ( v ( Id <lemma name>))
# 0 1 2 20 21 22 2200000 2200001 22000011
lemma = Lemma()
lemma.data_index = data_index
lemma.name = vernac.vernac_sexp[2][2][0][0][0][0][1][1].content_no_quote
lemma.qname = qprefix_this_doc + "." + ".".join(modules + [lemma.name])
# Find lemma content, after the first token matching the lemma name
tok_i = 0
for tok in sent.tokens:
if tok.content == lemma.name: break
tok_i += 1
# end for
if tok_i == len(sent.tokens): LoggingUtils.log_and_raise(cls.logger, f"Lemma name {lemma.name} didn't appear in the source code {sent.str_with_space()}", Exception)
lemma.vernac_command = sent.tokens[:tok_i]
lemma.statement = sent.tokens[tok_i + 1:]
lemma.ast_sexp = vernac.vernac_sexp
lemmas_doc.append(lemma)
# end if
# end for
# Use sername to get the backend representations
lemma_qnames: str = "".join([l.qname + "\n" for l in lemmas_doc])
lemma_qnames_file = BashUtils.get_temp_file()
IOUtils.dump(lemma_qnames_file, lemma_qnames, IOUtils.Format.txt)
lemma_qnames_backend_sexps_str: str = BashUtils.run(f"sername {serapi_options} --require-lib={qprefix_this_doc} {lemma_qnames_file}", expected_return_code=0).stdout
IOUtils.rm(lemma_qnames_file)
for qname_backend_sexp_str in lemma_qnames_backend_sexps_str.splitlines():
qname, backend_sexp_str = qname_backend_sexp_str.split(":", 1)
backend_sexp = SexpParser.parse(backend_sexp_str)
for lemma in lemmas_doc:
if lemma.qname == qname:
lemma.backend_sexp = backend_sexp
break
# end if
# end for
# end for
lemmas_doc = [l for l in lemmas_doc if l.backend_sexp is not None]
return lemmas_doc
@classmethod
def collect_definitions_doc(cls,
doc: CoqDocument,
ast_sexp_list: List[SexpNode],
) -> List[Definition]:
definitions_doc: List[Definition] = list()
data_index = doc.get_data_index()
for sent_i, sent in enumerate(doc.sentences):
ast_sexp = ast_sexp_list[sent_i]
vernac = SexpAnalyzer.analyze_vernac(ast_sexp)
if vernac.vernac_type in cls.VTYPES_DEFINITIONS:
# (VernacExpr()( VernacDefinition ( NoDischarge Definition) ( ( ( v ( Name ( Id codom ))) ...
# 0 1 2 20 21 210 211 22 220 2200 22000 22001 220010 220011 2200110 2200111
try:
if vernac.vernac_sexp[2][1][0].content == "NoDischarge" and vernac.vernac_sexp[2][1][1].content == "Definition":
definition = Definition()
definition.data_index = data_index
definition.name = vernac.vernac_sexp[2][2][0][0][1][1][1].content_no_quote
definitions_doc.append(definition)
# end if
except IllegalSexpOperationException:
continue
# end try
# end if
# end for
return definitions_doc
@classmethod
def extract_data_project(cls,
project_path: Path,
files: Optional[List[str]],
exclude_files: Optional[List[str]],
exclude_pattern: Optional[str],
serapi_options: str,
output_path: Path,
):
# 1. Prepare output path
if output_path.is_dir():
cls.logger.warning(f"{output_path} already exists, will overwrite the files.")
elif output_path.is_file():
LoggingUtils.log_and_raise(cls.logger, f"{output_path} already exists as a file. Aborting.", Exception)
else:
IOUtils.mk_dir(output_path)
# end if
# 2. Extract documents, tok.sexp and ast.sexp
coq_documents: Dict[str, CoqDocument] = collections.OrderedDict()
ast_sexp_lists: Dict[str, List[SexpNode]] = dict()
tok_sexp_lists: Dict[str, List[SexpNode]] = dict()
with IOUtils.cd(project_path):
coq_files: List[str] = BashUtils.run(f"find -name '*.v' -type f").stdout.split("\n")[:-1]
coq_files = [coq_file[2:] for coq_file in coq_files]
if files is not None:
coq_files = [f for f in coq_files if f in files]
# end if
if exclude_files is not None:
coq_files = [f for f in coq_files if f not in exclude_files]
# end if
if exclude_pattern is not None:
re_exclude_pattern = re.compile(exclude_pattern)
coq_files = [f for f in coq_files if not re_exclude_pattern.fullmatch(f)]
# end if
for i, coq_file in enumerate(tqdm(coq_files)):
try:
# Read file
with open(coq_file, "r", newline="") as f:
source_code = f.read()
# end with
# Get unicode offsets
unicode_offsets = ParserUtils.get_unicode_offsets(source_code)
# Call SerAPI
ast_sexp_str: str = BashUtils.run(f"sercomp {serapi_options} --mode=sexp -- {coq_file}", expected_return_code=0).stdout
tok_sexp_str: str = BashUtils.run(f"sertok {serapi_options} -- {coq_file}", expected_return_code=0).stdout
# Parse ast sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(ast_sexp_str)
tok_sexp_list: List[SexpNode] = SexpParser.parse_list(tok_sexp_str)
# Parse the document
coq_document = CoqParser.parse_document(source_code, ast_sexp_list, tok_sexp_list, unicode_offsets=unicode_offsets)
# Set meta data
coq_document.file_name = coq_file
coq_document.project_name = project_path.name
coq_documents[coq_file] = coq_document
ast_sexp_lists[coq_file] = ast_sexp_list
tok_sexp_lists[coq_file] = tok_sexp_list
except KeyboardInterrupt:
cls.logger.warning("Keyboard interrupt!")
raise
except:
cls.logger.warning(f"File {coq_file} failed! Exception was: {traceback.format_exc()}")
continue
# end try
# end for
# 3. Extract and save lemmas and definitions
lemmas: List[Lemma] = list()
definitions: List[Definition] = list()
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
for file_path, doc in tqdm(coq_documents.items()):
ast_sexp_list = ast_sexp_lists[file_path]
lemmas_doc = cls.collect_lemmas_doc(doc, ast_sexp_list, serapi_options)
lemmas.extend(lemmas_doc)
definitions_doc = cls.collect_definitions_doc(doc, ast_sexp_list)
definitions.extend(definitions_doc)
# end for
IOUtils.dump(output_path/"lemmas.json", IOUtils.jsonfy(lemmas), IOUtils.Format.json)
IOUtils.dump(output_path/"definitions.json", IOUtils.jsonfy(definitions), IOUtils.Format.json)
# end with
return
@classmethod
def extract_data_from_corpus(cls,
corpus_path: Path,
trainevals: List[str],
groups: List[str],
output_path: Path,
):
# 1. Prepare output path
if output_path.is_dir():
cls.logger.warning(f"{output_path} already exists, will overwrite the files.")
elif output_path.is_file():
LoggingUtils.log_and_raise(cls.logger, f"{output_path} already exists as a file. Aborting.", Exception)
else:
IOUtils.mk_dir(output_path)
# end if
assert all([traineval in Macros.DS_TRAINEVALS for traineval in trainevals])
assert all([group in Macros.DS_GROUPS+[Macros.DS_GROUP_TA] for group in groups])
data_mgr = FilesManager(corpus_path)
# 2. Load lemmas and definitions
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
definitions: List[Definition] = data_mgr.load_data([FilesManager.DEFINITIONS, "definitions.json"], IOUtils.Format.json, clz=Definition)
# 3. Output to output_path for each combination of traineval and group
for traineval in trainevals:
for group in groups:
IOUtils.mk_dir(output_path/f"{group}-{traineval}")
data_indexes = IOUtils.load(Macros.project_dir/"training"/f"{group}-{traineval}.json", IOUtils.Format.json)
IOUtils.dump(output_path/f"{group}-{traineval}/lemmas.json", IOUtils.jsonfy([l for l in lemmas_filtered if l.data_index in data_indexes]), IOUtils.Format.json)
IOUtils.dump(output_path/f"{group}-{traineval}/definitions.json", IOUtils.jsonfy([d for d in definitions if d.data_index in data_indexes]), IOUtils.Format.json)
# end for
# end for
return
| 49.016018
| 259
| 0.639519
| 5,252
| 42,840
| 4.977342
| 0.091013
| 0.015799
| 0.012356
| 0.00964
| 0.568226
| 0.487778
| 0.442179
| 0.404078
| 0.368922
| 0.348992
| 0
| 0.008656
| 0.266527
| 42,840
| 873
| 260
| 49.072165
| 0.823277
| 0.086835
| 0
| 0.352313
| 0
| 0.021352
| 0.121117
| 0.027954
| 0
| 0
| 0
| 0
| 0.003559
| 1
| 0.030249
| false
| 0
| 0.049822
| 0.001779
| 0.140569
| 0.001779
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
00464d29c3ee1cf1c9de61907d49f9253edbd2f3
| 965
|
py
|
Python
|
venv/Lib/site-packages/gensim/similarities/__init__.py
|
saritmaitra/nlp_ner_topic_modeling
|
70914b4ae4cd7d3b9cb10776161132216394883c
|
[
"MIT"
] | 3
|
2021-03-29T19:21:08.000Z
|
2021-12-31T09:30:11.000Z
|
VisionAPI/lib/python3.8/site-packages/gensim/similarities/__init__.py
|
aniruddhakj/AnswerScriptEvaluation
|
7b039b84355ecda1d55dc037ccfc4a4d661ad5e3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-30T08:53:09.000Z
|
2021-08-30T08:53:09.000Z
|
venv/Lib/site-packages/gensim/similarities/__init__.py
|
saritmaitra/nlp_ner_topic_modeling
|
70914b4ae4cd7d3b9cb10776161132216394883c
|
[
"MIT"
] | 1
|
2021-03-30T05:02:53.000Z
|
2021-03-30T05:02:53.000Z
|
"""
This package contains implementations of pairwise similarity queries.
"""
# bring classes directly into package namespace, to save some typing
import warnings
try:
import Levenshtein # noqa:F401
except ImportError:
msg = (
"The gensim.similarities.levenshtein submodule is disabled, because the optional "
"Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. "
"Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning."
)
warnings.warn(msg)
LevenshteinSimilarityIndex = None
else:
from .levenshtein import LevenshteinSimilarityIndex # noqa:F401
from .docsim import ( # noqa:F401
Similarity,
MatrixSimilarity,
SparseMatrixSimilarity,
SoftCosineSimilarity,
WmdSimilarity)
from .termsim import ( # noqa:F401
TermSimilarityIndex,
UniformTermSimilarityIndex,
WordEmbeddingSimilarityIndex,
SparseTermSimilarityMatrix)
| 32.166667
| 95
| 0.741969
| 91
| 965
| 7.868132
| 0.681319
| 0.044693
| 0.039106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015287
| 0.186529
| 965
| 29
| 96
| 33.275862
| 0.896815
| 0.18342
| 0
| 0
| 0
| 0
| 0.319588
| 0.039948
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
004abcc62f87b11013e726a8b69a3c514744935d
| 1,858
|
py
|
Python
|
src/PIDController.py
|
methylDragon/momo-emotions
|
137161632cc45227884d1a7a46dbd75d261de371
|
[
"BSD-2-Clause"
] | 11
|
2019-05-24T00:25:59.000Z
|
2021-05-17T07:08:58.000Z
|
src/PIDController.py
|
methylDragon/momo-emotions
|
137161632cc45227884d1a7a46dbd75d261de371
|
[
"BSD-2-Clause"
] | null | null | null |
src/PIDController.py
|
methylDragon/momo-emotions
|
137161632cc45227884d1a7a46dbd75d261de371
|
[
"BSD-2-Clause"
] | 10
|
2019-06-21T02:38:45.000Z
|
2021-07-07T04:50:39.000Z
|
import time
class PIDController:
def __init__(self, Kp=0.25, Ki=0.0, Kd=0.0, anti_windup=10.0, cmd_freq=0.0):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
# Set max integral correction per timestep
self.anti_windup = anti_windup
# Set delay between updates (seconds)
self.cmd_freq = cmd_freq
self.current_time = time.time()
self.prev_time = self.current_time
self.reset()
def reset(self):
self.setpoint = 0.0
self.p_ = 0.0
self.i_ = 0.0
self.d_ = 0.0
self.prev_error = 0.0
def compute(self, setpoint, measured_value):
''' Compute PID correction wrt. measured_value - setpoint '''
self.current_time = time.time()
delta_time = self.current_time - self.prev_time
if delta_time >= self.cmd_freq:
self.setpoint = setpoint
error = self.setpoint - measured_value
delta_error = error - self.prev_error
self.accumulated_error = error * delta_time
# Limit the integration to prevent absolutely wrecking yourself
if self.accumulated_error < -self.anti_windup:
self.accumulated_error = -self.anti_windup
if self.accumulated_error > self.anti_windup:
self.accumulated_error = self.anti_windup
self.i_ = self.i_ + self.accumulated_error
self.d_ = delta_error / delta_time
self.prev_error = error
self.prev_time = self.current_time
return self.Kp * error + self.Ki * self.i_ + self.Kd * self.d_
def set_kp(self, kp):
self.Kp = kp
def set_ki(self, ki):
self.Ki = ki
def set_kd(self, kd):
self.Kd = kd
def set_anti_windup(self, anti_windup):
self.anti_windup = anti_windup
| 27.731343
| 80
| 0.595264
| 248
| 1,858
| 4.229839
| 0.209677
| 0.104862
| 0.093422
| 0.114395
| 0.326978
| 0.188751
| 0.137274
| 0.13346
| 0.13346
| 0.13346
| 0
| 0.017336
| 0.317008
| 1,858
| 66
| 81
| 28.151515
| 0.809299
| 0.104413
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.02381
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
004b29aa8664031730012b088eba2ecb4c151bb6
| 1,420
|
py
|
Python
|
tilty/emitters/sqlite.py
|
heresurpizza/tilty
|
758fc2513b5fb660ac11163941340e4c10f61081
|
[
"MIT"
] | 13
|
2020-02-27T03:07:50.000Z
|
2022-01-02T20:01:57.000Z
|
tilty/emitters/sqlite.py
|
heresurpizza/tilty
|
758fc2513b5fb660ac11163941340e4c10f61081
|
[
"MIT"
] | 10
|
2020-03-04T14:57:59.000Z
|
2021-07-23T03:54:17.000Z
|
tilty/emitters/sqlite.py
|
heresurpizza/tilty
|
758fc2513b5fb660ac11163941340e4c10f61081
|
[
"MIT"
] | 8
|
2020-03-15T02:23:10.000Z
|
2020-11-25T12:42:37.000Z
|
# -*- coding: utf-8 -*-
""" SQLite emitter """
import logging
import sqlite3
LOGGER = logging.getLogger()
def __type__() -> str:
return 'SQLite'
class SQLite: # pylint: disable=too-few-public-methods
""" SQLite wrapper class """
def __init__(self, config: dict) -> None:
""" Initializer
Args:
config: (dict) represents the configuration for the emitter
"""
# <start config sample>
# [sqlite]
# file = /etc/tilty/tilt.sqlite
self.conn = sqlite3.connect(config['file'])
self.conn.execute('''
CREATE TABLE IF NOT EXISTS data(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
gravity INTEGER,
temp INTEGER,
color VARCHAR(16),
mac VARCHAR(17),
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL)
''')
def emit(self, tilt_data: dict) -> None:
""" Initializer
Args:
tilt_data (dict): data returned from valid tilt device scan
"""
LOGGER.info('[sqlite] creating row')
self.conn.execute(
"insert into data (gravity,temp,color,mac) values (?,?,?,?)",
(
tilt_data['gravity'],
tilt_data['temp'],
tilt_data['color'],
tilt_data['mac']
)
)
self.conn.commit()
| 26.792453
| 73
| 0.527465
| 142
| 1,420
| 5.169014
| 0.542254
| 0.065395
| 0.051771
| 0.06267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.354225
| 1,420
| 52
| 74
| 27.307692
| 0.792803
| 0.229577
| 0
| 0
| 0
| 0
| 0.409852
| 0.023645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0.034483
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
004de3fdbd877d9dca8c67922ef30d1cf30e4c3c
| 6,043
|
py
|
Python
|
IFishFarm.py
|
HussamElden/IFishFarm
|
c49acc997229b9ae0649d9e4765255cb2db02bfc
|
[
"CECILL-B"
] | 1
|
2021-08-03T13:24:38.000Z
|
2021-08-03T13:24:38.000Z
|
IFishFarm.py
|
HussamElden/IFishFarm
|
c49acc997229b9ae0649d9e4765255cb2db02bfc
|
[
"CECILL-B"
] | null | null | null |
IFishFarm.py
|
HussamElden/IFishFarm
|
c49acc997229b9ae0649d9e4765255cb2db02bfc
|
[
"CECILL-B"
] | 2
|
2021-01-12T11:25:11.000Z
|
2022-03-11T21:25:53.000Z
|
import cv2
import numpy as np
from numpy.linalg import norm
import math
import csv
from operator import itemgetter
from datetime import datetime
import VideoEnhancement
import fishpredictor
import detector
import kmeancluster
import preproccesing
import randomforst
cluster = kmeancluster.kmeans()
classifier = randomforst.randomforst()
samak = []
framenum = 0
sum = 0
max = 0
mylist = [[]]
yolo = detector.detector()
cap = cv2.VideoCapture('chaos1.avi')
ret, frame = cap.read()
fheight, fwidth, channels = frame.shape
resize = False
if (fheight > 352 or fwidth > 640):
resize = True
fwidth = 640
fheight = 352
frame = cv2.resize(frame, (640, 352))
mask = np.zeros_like(frame)
# Needed for saving video
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
dt_string = datetime.now().strftime("%H_%M_%S_%d_%m_%y")
num_seconds = 10
video = cv2.VideoWriter('videonormal/' +str(num_seconds*round(fps))+'_'+str(dt_string)+'.avi', fourcc, fps, (fwidth, fheight))
# Read until video is completed
counter = 0
buffer = [[]]
apperance = [[]]
last_changed = []
top = 0
frms = 0
# Needed to track objects
n_frame = 8
ref_n_frame_axies = []
ref_n_frame_label = []
ref_n_frame_axies_flatten = []
ref_n_frame_label_flatten = []
frm_num = 1
coloredLine = np.random.randint(0, 255, (10000, 3))
arr = []
label_cnt = 1
min_distance = 50
while (cap.isOpened()):
ret, img = cap.read()
if ret == True:
if frms % 2 == 0:
img = VideoEnhancement.enhanceVideo(img, resize)
v = 0
cur_frame_axies = []
cur_frame_label = []
height, width, channels = img.shape
boxes, confidences, centers, colors = yolo.detect(img)
counter += 1
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.1, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
fishcounter = 1
for i in range(len(boxes)):
if i in indexes:
lbl = float('nan')
x, y, w, h, = boxes[i]
center_x, center_y = centers[i]
color = colors[0]
if (len(ref_n_frame_label_flatten) > 0):
b = np.array([(center_x, center_y)])
a = np.array(ref_n_frame_axies_flatten)
distance = norm(a - b, axis=1)
min_value = distance.min()
if (min_value < min_distance):
idx = np.where(distance == min_value)[0][0]
lbl = ref_n_frame_label_flatten[idx]
points = (int(ref_n_frame_axies_flatten[idx][0]), int(ref_n_frame_axies_flatten[idx][1]))
mask = cv2.line(mask, (center_x, center_y), points, coloredLine[lbl].tolist(), 2)
cv2.circle(img, points, 5, coloredLine[lbl].tolist(), -1)
if (math.isnan(lbl)):
lbl = label_cnt
label_cnt += 1
arr.append([counter, lbl, center_x, center_y])
cur_frame_label.append(lbl)
cur_frame_axies.append((center_x, center_y))
samak.append([lbl, x, y, w, h])
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, '{}{}'.format("Fish", lbl), (x, y - 5), font, 1, (255, 255, 255), 2)
if (len(ref_n_frame_axies) == n_frame):
del ref_n_frame_axies[0]
del ref_n_frame_label[0]
ref_n_frame_label.append(cur_frame_label)
ref_n_frame_axies.append(cur_frame_axies)
ref_n_frame_axies_flatten = [a for ref_n_frame_axie in ref_n_frame_axies for a in ref_n_frame_axie]
ref_n_frame_label_flatten = [b for ref_n_frame_lbl in ref_n_frame_label for b in ref_n_frame_lbl]
z = sorted(samak, key=itemgetter(0))
samak = []
if (len(z) != 0):
fishpredictor.predictfish(z, apperance, buffer, last_changed, top, img, color, mylist, framenum)
img = cv2.add(img, mask)
# cv2.imshow("Image", img)
mylist.append([])
framenum += 1
print(frms)
print("----------")
# cap.set(1,frms)
video.write(img)
if (frms % (round(fps) * num_seconds) == 0 and frms!=0):
result = cluster.classify(mask)
print(classifier.classify(z, mask,fps))
if (result == 1):
with open('exceltext/' + str(frms)+'_'+str(dt_string)+ '.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(mylist)
# writer.writerows(preproccesing.featuresCalc(mylist))
cv2.imwrite("trajecstest" + str(frms)+'_'+str(dt_string) + ".png", mask)
video.release()
dt_string = datetime.now().strftime("%H_%M_%S_%d_%m_%y")
video = cv2.VideoWriter('videotest/' + str(frms+(num_seconds*round(fps)))+'_'+str(dt_string)+'.avi', fourcc, fps,
(fwidth, fheight))
print("result " + str(result))
mask = np.zeros_like(frame)
ref_n_frame_axies = []
ref_n_frame_label = []
ref_n_frame_axies_flatten = []
ref_n_frame_label_flatten = []
buffer = [[]]
apperance = [[]]
last_changed = []
# frms = 0
counter = 0
mylist = [[]]
framenum = 0
fishcounter = 1
label_cnt = 1
top = 0
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
frms += 1
cap.release()
cv2.destroyAllWindows()
video.release()
| 35.547059
| 133
| 0.529869
| 721
| 6,043
| 4.223301
| 0.273232
| 0.055172
| 0.076847
| 0.055172
| 0.197701
| 0.128736
| 0.120854
| 0.10312
| 0.10312
| 0.10312
| 0
| 0.02855
| 0.350819
| 6,043
| 170
| 134
| 35.547059
| 0.747642
| 0.032269
| 0
| 0.25
| 0
| 0
| 0.024144
| 0
| 0
| 0
| 0.000685
| 0
| 0
| 1
| 0
| false
| 0
| 0.090278
| 0
| 0.090278
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|